text
stringlengths
7
3.71M
id
stringlengths
12
166
metadata
dict
__index_level_0__
int64
0
658
repos: - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.2.0 hooks: - id: ruff args: [ --fix ] - id: ruff-format # - repo: https://github.com/codespell-project/codespell # rev: v2.1.0 # hooks: # - id: codespell # args: # - --ignore-words-list=nd,reacher,thist,ths,magent,ba # - --skip=docs/css/termynal.css,docs/js/termynal.js
trl/.pre-commit-config.yaml/0
{ "file_path": "trl/.pre-commit-config.yaml", "repo_id": "trl", "token_count": 212 }
649
#!/bin/bash #SBATCH --job-name=trl #SBATCH --partition=hopper-cpu #SBATCH --ntasks=1 #SBATCH --output=slurm/logs/%x_%j.out sleep 2m bash $BENCHMARK_PLOT_SCRIPT srun python benchmark/post_github_comment.py
trl/benchmark/post_github_comment.sbatch/0
{ "file_path": "trl/benchmark/post_github_comment.sbatch", "repo_id": "trl", "token_count": 90 }
650
# Examples ## Introduction The examples should work in any of the following settings (with the same script): - single GPU - multi GPUS (using PyTorch distributed mode) - multi GPUS (using DeepSpeed ZeRO-Offload stages 1, 2, & 3) - fp16 (mixed-precision), fp32 (normal precision), or bf16 (bfloat16 precision) To run it in each of these various modes, first initialize the accelerate configuration with `accelerate config` **NOTE to train with a 4-bit or 8-bit model**, please run ```bash pip install --upgrade trl[quantization] ``` ## Accelerate Config For all the examples, you'll need to generate a 🤗 Accelerate config file with: ```shell accelerate config # will prompt you to define the training configuration ``` Then, it is encouraged to launch jobs with `accelerate launch`! # Maintained Examples | File | Description | |------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| | [`examples/scripts/sft.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/sft.py) | This script shows how to use the `SFTTrainer` to fine tune a model or adapters into a target dataset. | | [`examples/scripts/vsft_llava.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/vsft_llava.py) | This script shows how to use the `SFTTrainer` to fine tune a Vision Language Model in a chat setting, the script has been tested on a llava1.5 model so users may see unexpected behaviour in other model architectures. | | [`examples/scripts/reward_modeling.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/reward_modeling.py) | This script shows how to use the `RewardTrainer` to train a reward model on your own dataset. | | [`examples/scripts/ppo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) | This script shows how to use the `PPOTrainer` to fine-tune a sentiment analysis model using IMDB dataset | | [`examples/scripts/ppo_multi_adapter.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo_multi_adapter.py) | This script shows how to use the `PPOTrainer` to train a single base model with multiple adapters. Requires you to run the example script with the reward model training beforehand. | | [`examples/scripts/stable_diffusion_tuning_example.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/stable_diffusion_tuning_example.py) | This script shows to use DDPOTrainer to fine-tune a stable diffusion model using reinforcement learning. | Here are also some easier-to-run colab notebooks that you can use to get started with TRL: | File | Description | |----------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| | [`examples/notebooks/best_of_n.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/best_of_n.ipynb) | This notebook demonstrates how to use the "Best of N" sampling strategy using TRL when fine-tuning your model with PPO. | | [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb) | This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. | | [`examples/notebooks/gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-control.ipynb) | This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook. | We also have some other examples that are less maintained but can be used as a reference: 1. **[research_projects](https://github.com/huggingface/trl/tree/main/examples/research_projects)**: Check out this folder to find the scripts used for some research projects that used TRL (LM de-toxification, Stack-Llama, etc.) ## Distributed training All of the scripts can be run on multiple GPUs by providing the path of an 🤗 Accelerate config file when calling `accelerate launch`. To launch one of them on one or multiple GPUs, run the following command (swapping `{NUM_GPUS}` with the number of GPUs in your machine and `--all_arguments_of_the_script` with your arguments.) ```shell accelerate launch --config_file=examples/accelerate_configs/multi_gpu.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script ``` You can also adjust the parameters of the 🤗 Accelerate config file to suit your needs (e.g. training in mixed precision). ### Distributed training with DeepSpeed Most of the scripts can be run on multiple GPUs together with DeepSpeed ZeRO-{1,2,3} for efficient sharding of the optimizer states, gradients, and model weights. To do so, run following command (swapping `{NUM_GPUS}` with the number of GPUs in your machine, `--all_arguments_of_the_script` with your arguments, and `--deepspeed_config` with the path to the DeepSpeed config file such as `examples/deepspeed_configs/deepspeed_zero1.yaml`): ```shell accelerate launch --config_file=examples/accelerate_configs/deepspeed_zero{1,2,3}.yaml --num_processes {NUM_GPUS} path_to_script.py --all_arguments_of_the_script ```
trl/docs/source/example_overview.md/0
{ "file_path": "trl/docs/source/example_overview.md", "repo_id": "trl", "token_count": 2089 }
651
# Supervised Fine-tuning Trainer Supervised fine-tuning (or SFT for short) is a crucial step in RLHF. In TRL we provide an easy-to-use API to create your SFT models and train them with few lines of code on your dataset. Check out a complete flexible example at [`examples/scripts/sft.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/sft.py). Experimental support for Vision Language Models is also included in the example [`examples/scripts/vsft_llava.py`](https://github.com/huggingface/trl/tree/main/examples/scripts/vsft_llava.py). ## Quickstart If you have a dataset hosted on the 🤗 Hub, you can easily fine-tune your SFT model using [`SFTTrainer`] from TRL. Let us assume your dataset is `imdb`, the text you want to predict is inside the `text` field of the dataset, and you want to fine-tune the `facebook/opt-350m` model. The following code-snippet takes care of all the data pre-processing and training for you: ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer dataset = load_dataset("imdb", split="train") sft_config = SFTConfig( dataset_text_field="text", max_seq_length=512, output_dir="/tmp", ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=training_args, ) trainer.train() ``` Make sure to pass the correct value for `max_seq_length` as the default value will be set to `min(tokenizer.model_max_length, 1024)`. You can also construct a model outside of the trainer and pass it as follows: ```python from transformers import AutoModelForCausalLM from datasets import load_dataset from trl import SFTConfig, SFTTrainer dataset = load_dataset("imdb", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") sft_config = SFTConfig(output_dir="/tmp") trainer = SFTTrainer( model, train_dataset=dataset, args=sft_config, ) trainer.train() ``` The above snippets will use the default training arguments from the [`SFTConfig`] class. If you want to modify the defaults pass in your modification to the `SFTConfig` constructor and pass them to the trainer via the `args` argument. ## Advanced usage ### Train on completions only You can use the `DataCollatorForCompletionOnlyLM` to train your model on the generated prompts only. Note that this works only in the case when `packing=False`. To instantiate that collator for instruction data, pass a response template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on completions only on the CodeAlpaca dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("lucasmccabe-lmi/CodeAlpaca-20k", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") def formatting_prompts_func(example): output_texts = [] for i in range(len(example['instruction'])): text = f"### Question: {example['instruction'][i]}\n ### Answer: {example['output'][i]}" output_texts.append(text) return output_texts response_template = " ### Answer:" collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer) trainer = SFTTrainer( model, train_dataset=dataset, args=SFTConfig(output_dir="/tmp"), formatting_func=formatting_prompts_func, data_collator=collator, ) trainer.train() ``` To instantiate that collator for assistant style conversation data, pass a response template, an instruction template and the tokenizer. Here is an example of how it would work to fine-tune `opt-350m` on assistant completions only on the Open Assistant Guanaco dataset: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset from trl import SFTConfig, SFTTrainer, DataCollatorForCompletionOnlyLM dataset = load_dataset("timdettmers/openassistant-guanaco", split="train") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") instruction_template = "### Human:" response_template = "### Assistant:" collator = DataCollatorForCompletionOnlyLM(instruction_template=instruction_template, response_template=response_template, tokenizer=tokenizer, mlm=False) trainer = SFTTrainer( model, args=SFTConfig(output_dir="/tmp"), train_dataset=dataset, data_collator=collator, ) trainer.train() ``` Make sure to have a `pad_token_id` which is different from `eos_token_id` which can result in the model not properly predicting EOS (End of Sentence) tokens during generation. #### Using token_ids directly for `response_template` Some tokenizers like Llama 2 (`meta-llama/Llama-2-XXb-hf`) tokenize sequences differently depending on whether they have context or not. For example: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf") def print_tokens_with_ids(txt): tokens = tokenizer.tokenize(txt, add_special_tokens=False) token_ids = tokenizer.encode(txt, add_special_tokens=False) print(list(zip(tokens, token_ids))) prompt = """### User: Hello\n\n### Assistant: Hi, how can I help you?""" print_tokens_with_ids(prompt) # [..., ('▁Hello', 15043), ('<0x0A>', 13), ('<0x0A>', 13), ('##', 2277), ('#', 29937), ('▁Ass', 4007), ('istant', 22137), (':', 29901), ...] response_template = "### Assistant:" print_tokens_with_ids(response_template) # [('▁###', 835), ('▁Ass', 4007), ('istant', 22137), (':', 29901)] ``` In this case, and due to lack of context in `response_template`, the same string ("### Assistant:") is tokenized differently: - Text (with context): `[2277, 29937, 4007, 22137, 29901]` - `response_template` (without context): `[835, 4007, 22137, 29901]` This will lead to an error when the `DataCollatorForCompletionOnlyLM` does not find the `response_template` in the dataset example text: ``` RuntimeError: Could not find response key [835, 4007, 22137, 29901] in token IDs tensor([ 1, 835, ...]) ``` To solve this, you can tokenize the `response_template` with the same context as in the dataset, truncate it as needed and pass the `token_ids` directly to the `response_template` argument of the `DataCollatorForCompletionOnlyLM` class. For example: ```python response_template_with_context = "\n### Assistant:" # We added context here: "\n". This is enough for this tokenizer response_template_ids = tokenizer.encode(response_template_with_context, add_special_tokens=False)[2:] # Now we have it like in the dataset texts: `[2277, 29937, 4007, 22137, 29901]` data_collator = DataCollatorForCompletionOnlyLM(response_template_ids, tokenizer=tokenizer) ``` ### Add Special Tokens for Chat Format Adding special tokens to a language model is crucial for training chat models. These tokens are added between the different roles in a conversation, such as the user, assistant, and system and help the model recognize the structure and flow of a conversation. This setup is essential for enabling the model to generate coherent and contextually appropriate responses in a chat environment. The [`setup_chat_format`] function in `trl` easily sets up a model and tokenizer for conversational AI tasks. This function: - Adds special tokens to the tokenizer, e.g. `<|im_start|>` and `<|im_end|>`, to indicate the start and end of a conversation. - Resizes the model’s embedding layer to accommodate the new tokens. - Sets the `chat_template` of the tokenizer, which is used to format the input data into a chat-like format. The default is `chatml` from OpenAI. - _optionally_ you can pass `resize_to_multiple_of` to resize the embedding layer to a multiple of the `resize_to_multiple_of` argument, e.g. 64. If you want to see more formats being supported in the future, please open a GitHub issue on [trl](https://github.com/huggingface/trl) ```python from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") # Set up the chat format with default 'chatml' format model, tokenizer = setup_chat_format(model, tokenizer) ``` With our model and tokenizer set up, we can now fine-tune our model on a conversational dataset. Below is an example of how a dataset can be formatted for fine-tuning. ### Dataset format support The [`SFTTrainer`] supports popular dataset formats. This allows you to pass the dataset to the trainer without any pre-processing directly. The following formats are supported: * conversational format ```json {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "..."}]} {"messages": [{"role": "system", "content": "You are helpful"}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "..."}]} ``` * instruction format ```json {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} {"prompt": "<prompt text>", "completion": "<ideal generated text>"} ``` If your dataset uses one of the above formats, you can directly pass it to the trainer without pre-processing. The [`SFTTrainer`] will then format the dataset for you using the defined format from the model's tokenizer with the [apply_chat_template](https://huggingface.co/docs/transformers/main/en/chat_templating#templates-for-chat-models) method. ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer ... # load jsonl dataset dataset = load_dataset("json", data_files="path/to/dataset.jsonl", split="train") # load dataset from the HuggingFace Hub dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") ... sft_config = STFConfig(packing=True) trainer = SFTTrainer( "facebook/opt-350m", args=sft_config, train_dataset=dataset, ) ``` If the dataset is not in one of those format you can either preprocess the dataset to match the formatting or pass a formatting function to the SFTTrainer to do it for you. Let's have a look. ### Format your input prompts For instruction fine-tuning, it is quite common to have two columns inside the dataset: one for the prompt & the other for the response. This allows people to format examples like [Stanford-Alpaca](https://github.com/tatsu-lab/stanford_alpaca) did as follows: ```bash Below is an instruction ... ### Instruction {prompt} ### Response: {completion} ``` Let us assume your dataset has two fields, `question` and `answer`. Therefore you can just run: ```python ... def formatting_prompts_func(example): output_texts = [] for i in range(len(example['question'])): text = f"### Question: {example['question'][i]}\n ### Answer: {example['answer'][i]}" output_texts.append(text) return output_texts trainer = SFTTrainer( model, args=sft_config, train_dataset=dataset, formatting_func=formatting_prompts_func, ) trainer.train() ``` To properly format your input make sure to process all the examples by looping over them and returning a list of processed text. Check out a full example of how to use SFTTrainer on alpaca dataset [here](https://github.com/huggingface/trl/pull/444#issue-1760952763) ### Packing dataset ([`ConstantLengthDataset`]) [`SFTTrainer`] supports _example packing_, where multiple short examples are packed in the same input sequence to increase training efficiency. This is done with the [`ConstantLengthDataset`] utility class that returns constant length chunks of tokens from a stream of examples. To enable the usage of this dataset class, simply pass `packing=True` to the [`SFTTrainer`] constructor. ```python ... trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", packing=True ) trainer.train() ``` Note that if you use a packed dataset and if you pass `max_steps` in the training arguments you will probably train your models for more than few epochs, depending on the way you have configured the packed dataset and the training protocol. Double check that you know and understand what you are doing. If you don't want to pack your `eval_dataset`, you can pass `eval_packing=False` to the `SFTTrainer` init method. #### Customize your prompts using packed dataset If your dataset has several fields that you want to combine, for example if the dataset has `question` and `answer` fields and you want to combine them, you can pass a formatting function to the trainer that will take care of that. For example: ```python def formatting_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text sft_config = STFConfig(packing=True) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=sft_config, formatting_func=formatting_func ) trainer.train() ``` You can also customize the [`ConstantLengthDataset`] much more by directly passing the arguments to the [`SFTTrainer`] constructor. Please refer to that class' signature for more information. ### Control over the pretrained model You can directly pass the kwargs of the `from_pretrained()` method to the [`SFTTrainer`]. For example, if you want to load a model in a different precision, analogous to ```python model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.bfloat16) ... sft_config = SFTConfig( model_init_kwargs={ "torch_dtype": "bfloat16", }, output_dir="/tmp", ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=sft_config, ) trainer.train() ``` Note that all keyword arguments of `from_pretrained()` are supported. ### Training adapters We also support tight integration with 🤗 PEFT library so that any user can conveniently train adapters and share them on the Hub instead of training the entire model ```python from datasets import load_dataset from trl import SFTConfig, SFTTrainer from peft import LoraConfig dataset = load_dataset("imdb", split="train") peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( "EleutherAI/gpt-neo-125m", train_dataset=dataset, args=SFTConfig(output_dir="/tmp"), peft_config=peft_config ) trainer.train() ``` You can also continue training your `PeftModel`. For that, first load a `PeftModel` outside `SFTTrainer` and pass it directly to the trainer without the `peft_config` argument being passed. ### Training adapters with base 8 bit models For that, you need to first load your 8 bit model outside the Trainer and pass a `PeftConfig` to the trainer. For example: ```python ... peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained( "EleutherAI/gpt-neo-125m", load_in_8bit=True, device_map="auto", ) trainer = SFTTrainer( model, train_dataset=dataset, args=STFConfig(), peft_config=peft_config, ) trainer.train() ``` ## Using Flash Attention and Flash Attention 2 You can benefit from Flash Attention 1 & 2 using SFTTrainer out of the box with minimal changes of code. First, to make sure you have all the latest features from transformers, install transformers from source ```bash pip install -U git+https://github.com/huggingface/transformers.git ``` Note that Flash Attention only works on GPU now and under half-precision regime (when using adapters, base model loaded in half-precision) Note also both features are perfectly compatible with other tools such as quantization. ### Using Flash-Attention 1 For Flash Attention 1 you can use the `BetterTransformer` API and force-dispatch the API to use Flash Attention kernel. First, install the latest optimum package: ```bash pip install -U optimum ``` Once you have loaded your model, wrap the `trainer.train()` call under the `with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):` context manager: ```diff ... + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): trainer.train() ``` Note that you cannot train your model using Flash Attention 1 on an arbitrary dataset as `torch.scaled_dot_product_attention` does not support training with padding tokens if you use Flash Attention kernels. Therefore you can only use that feature with `packing=True`. If your dataset contains padding tokens, consider switching to Flash Attention 2 integration. Below are some numbers you can get in terms of speedup and memory efficiency, using Flash Attention 1, on a single NVIDIA-T4 16GB. | use_flash_attn_1 | model_name | max_seq_len | batch_size | time per training step | | ---------------- | ----------------- | ----------- | ---------- | ---------------------- | | x | facebook/opt-350m | 2048 | 8 | ~59.1s | | | facebook/opt-350m | 2048 | 8 | **OOM** | | x | facebook/opt-350m | 2048 | 4 | ~30.3s | | | facebook/opt-350m | 2048 | 4 | ~148.9s | ### Using Flash Attention-2 To use Flash Attention 2, first install the latest `flash-attn` package: ```bash pip install -U flash-attn ``` And add `attn_implementation="flash_attention_2"` when calling `from_pretrained`: ```python model = AutoModelForCausalLM.from_pretrained( model_id, load_in_4bit=True, attn_implementation="flash_attention_2" ) ``` If you don't use quantization, make sure your model is loaded in half-precision and dispatch your model on a supported GPU device. After loading your model, you can either train it as it is, or attach adapters and train adapters on it in case your model is quantized. In contrast to Flash Attention 1, the integration makes it possible to train your model on an arbitrary dataset that also includes padding tokens. ### Using model creation utility We included a utility function to create your model. [[autodoc]] ModelConfig ```python from trl import ModelConfig, SFTTrainer, get_kbit_device_map, get_peft_config, get_quantization_config model_config = ModelConfig( model_name_or_path="facebook/opt-350m" attn_implementation=None, # or "flash_attention_2" ) torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) trainer = SFTTrainer( ..., model=model_config.model_name_or_path, peft_config=get_peft_config(model_config), ) ``` ### Enhance the model's performances using NEFTune NEFTune is a technique to boost the performance of chat models and was introduced by the paper ["NEFTune: Noisy Embeddings Improve Instruction Finetuning"](https://arxiv.org/abs/2310.05914) from Jain et al. it consists of adding noise to the embedding vectors during training. According to the abstract of the paper: > Standard finetuning of LLaMA-2-7B using Alpaca achieves 29.79% on AlpacaEval, which rises to 64.69% using noisy embeddings. NEFTune also improves over strong baselines on modern instruction datasets. Models trained with Evol-Instruct see a 10% improvement, with ShareGPT an 8% improvement, and with OpenPlatypus an 8% improvement. Even powerful models further refined with RLHF such as LLaMA-2-Chat benefit from additional training with NEFTune. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/neft-screenshot.png"> </div> To use it in `SFTTrainer` simply pass `neftune_noise_alpha` when creating your `SFTConfig` instance. Note that to avoid any surprising behaviour, NEFTune is disabled after training to retrieve back the original behaviour of the embedding layer. ```python from datasets import load_dataset from trl import STFConfig, SFTTrainer dataset = load_dataset("imdb", split="train") sft_config = STFConfig( neftune_noise_alpha=5, ) trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, args=sft_config, ) trainer.train() ``` We have tested NEFTune by training `mistralai/Mistral-7B-v0.1` on the [OpenAssistant dataset](https://huggingface.co/datasets/timdettmers/openassistant-guanaco) and validated that using NEFTune led to a performance boost of ~25% on MT Bench. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl-neftune-mistral-7b.png"> </div> Note however, that the amount of performance gain is _dataset dependent_ and in particular, applying NEFTune on synthetic datasets like [UltraChat](https://huggingface.co/datasets/stingning/ultrachat) typically produces smaller gains. ### Accelerate fine-tuning 2x using `unsloth` You can further accelerate QLoRA / LoRA (2x faster, 60% less memory) using the [`unsloth`](https://github.com/unslothai/unsloth) library that is fully compatible with `SFTTrainer`. Currently `unsloth` supports only Llama (Yi, TinyLlama, Qwen, Deepseek etc) and Mistral architectures. Some benchmarks on 1x A100 listed below: | 1 A100 40GB | Dataset | 🤗 | 🤗 + Flash Attention 2 | 🦥 Unsloth | 🦥 VRAM saved | |-----------------|-----------|-----|-------------------------|-----------------|----------------| | Code Llama 34b | Slim Orca | 1x | 1.01x | **1.94x** | -22.7% | | Llama-2 7b | Slim Orca | 1x | 0.96x | **1.87x** | -39.3% | | Mistral 7b | Slim Orca | 1x | 1.17x | **1.88x** | -65.9% | | Tiny Llama 1.1b | Alpaca | 1x | 1.55x | **2.74x** | -57.8% | First install `unsloth` according to the [official documentation](https://github.com/unslothai/unsloth). Once installed, you can incorporate unsloth into your workflow in a very simple manner; instead of loading `AutoModelForCausalLM`, you just need to load a `FastLanguageModel` as follows: ```python import torch from trl import SFTConfig, SFTTrainer from unsloth import FastLanguageModel max_seq_length = 2048 # Supports automatic RoPE Scaling, so choose any number # Load model model, tokenizer = FastLanguageModel.from_pretrained( model_name="unsloth/mistral-7b", max_seq_length=max_seq_length, dtype=None, # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit=True, # Use 4bit quantization to reduce memory usage. Can be False # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) # Do model patching and add fast LoRA weights model = FastLanguageModel.get_peft_model( model, r=16, target_modules=[ "q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj", ], lora_alpha=16, lora_dropout=0, # Dropout = 0 is currently optimized bias="none", # Bias = "none" is currently optimized use_gradient_checkpointing=True, random_state=3407, ) args = SFTConfig( output_dir="./output", max_seq_length=max_seq_length, dataset_text_field="text", ) trainer = SFTTrainer( model=model, args=args, train_dataset=dataset, ) trainer.train() ``` The saved model is fully compatible with Hugging Face's transformers library. Learn more about unsloth in their [official repository](https://github.com/unslothai/unsloth). ## Best practices Pay attention to the following best practices when training a model with that trainer: - [`SFTTrainer`] always pads by default the sequences to the `max_seq_length` argument of the [`SFTTrainer`]. If none is passed, the trainer will retrieve that value from the tokenizer. Some tokenizers do not provide a default value, so there is a check to retrieve the minimum between 2048 and that value. Make sure to check it before training. - For training adapters in 8bit, you might need to tweak the arguments of the `prepare_model_for_kbit_training` method from PEFT, hence we advise users to use `prepare_in_int8_kwargs` field, or create the `PeftModel` outside the [`SFTTrainer`] and pass it. - For a more memory-efficient training using adapters, you can load the base model in 8bit, for that simply add `load_in_8bit` argument when creating the [`SFTTrainer`], or create a base model in 8bit outside the trainer and pass it. - If you create a model outside the trainer, make sure to not pass to the trainer any additional keyword arguments that are relative to `from_pretrained()` method. ## Multi-GPU Training Trainer (and thus SFTTrainer) supports multi-GPU training. If you run your script with `python script.py` it will default to using DP as the strategy, which may be [slower than expected](https://github.com/huggingface/trl/issues/1303). To use DDP (which is generally recommended, see [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many?select-gpu=Accelerate#data-parallelism) for more info) you must launch the script with `python -m torch.distributed.launch script.py` or `accelerate launch script.py`. For DDP to work you must also check the following: - If you're using gradient_checkpointing, add the following to the TrainingArguments: `gradient_checkpointing_kwargs={'use_reentrant':False}` (more info [here](https://github.com/huggingface/transformers/issues/26969) - Ensure that the model is placed on the correct device: ```python from accelerate import PartialState device_string = PartialState().process_index model = AutoModelForCausalLM.from_pretrained( ... device_map={'':device_string} ) ``` ## GPTQ Conversion You may experience some issues with GPTQ Quantization after completing training. Lowering `gradient_accumulation_steps` to `4` will resolve most issues during the quantization process to GPTQ format. ## SFTTrainer [[autodoc]] SFTTrainer ## SFTConfig [[autodoc]] SFTConfig ## Datasets In the SFTTrainer we smartly support `datasets.IterableDataset` in addition to other style datasets. This is useful if you are using large corpora that you do not want to save all to disk. The data will be tokenized and processed on the fly, even when packing is enabled. Additionally, in the SFTTrainer, we support pre-tokenized datasets if they are `datasets.Dataset` or `datasets.IterableDataset`. In other words, if such a dataset has a column of `input_ids`, no further processing (tokenization or packing) will be done, and the dataset will be used as-is. This can be useful if you have pretokenized your dataset outside of this script and want to re-use it directly. ### ConstantLengthDataset [[autodoc]] trainer.ConstantLengthDataset
trl/docs/source/sft_trainer.mdx/0
{ "file_path": "trl/docs/source/sft_trainer.mdx", "repo_id": "trl", "token_count": 9044 }
652
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from peft import LoraConfig from transformers import AutoTokenizer, HfArgumentParser, load_tool from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, TextEnvironment os.environ["HF_ALLOW_CODE_EVAL"] = "1" os.environ["TOKENIZERS_PARALLELISM"] = "false" @dataclass class ScriptArguments: model_name: Optional[str] = field(default="bigcode/starcoderbase", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=1e-5, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=1, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=32, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "the number of gradient accumulation steps"} ) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "max number of generated tokens per turn"}) ppo_epochs: Optional[int] = field(default=1, metadata={"help": "max number of ppo epochs"}) iterations: Optional[int] = field(default=1000, metadata={"help": "the number of iterations"}) seed: Optional[int] = field(default=0, metadata={"help": "the random seed"}) parser = HfArgumentParser(ScriptArguments) args = parser.parse_args_into_dataclasses()[0] lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["c_proj", "c_attn", "q_attn"], ) # set up models model = AutoModelForCausalLMWithValueHead.from_pretrained( args.model_name, use_auth_token=True, trust_remote_code=True, load_in_4bit=True, peft_config=lora_config, ) tokenizer = AutoTokenizer.from_pretrained(args.model_name, use_auth_token=True) tokenizer.pad_token = tokenizer.eos_token # system prompt prompt = """\ Answer the following question: Q: In which branch of the arts is Patricia Neary famous? A: Ballets A2: <request><Wiki>Patricia Neary<call>Patricia Neary (born October 27, 1942) is an American ballerina, choreographer and ballet director, who has been particularly active in Switzerland. She has also been a highly successful ambassador for the Balanchine Trust, bringing George Balanchine's ballets to 60 cities around the globe.<response> Result=Ballets<submit> Q: Who won Super Bowl XX? A: Chicago Bears A2: <request><Wiki>Super Bowl XX<call>Super Bowl XX was an American football game between the National Football Conference (NFC) champion Chicago Bears and the American Football Conference (AFC) champion New England Patriots to decide the National Football League (NFL) champion for the 1985 season. The Bears defeated the Patriots by the score of 46–10, capturing their first NFL championship (and Chicago's first overall sports victory) since 1963, three years prior to the birth of the Super Bowl. Super Bowl XX was played on January 26, 1986 at the Louisiana Superdome in New Orleans.<response> Result=Chicago Bears<submit> Q: """ generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, "eos_token_id": -1, "max_new_tokens": args.max_new_tokens, } # trainer config = PPOConfig( batch_size=args.batch_size, model_name=args.model_name, learning_rate=args.learning_rate, log_with=args.log_with, mini_batch_size=args.mini_batch_size, ppo_epochs=args.ppo_epochs, gradient_accumulation_steps=args.gradient_accumulation_steps, seed=args.seed, optimize_cuda_cache=True, ) ppo_trainer = PPOTrainer(config=config, model=model, tokenizer=tokenizer) dataset = load_dataset("trivia_qa", "rc", split="train") local_seed = args.seed + ppo_trainer.accelerator.process_index * 100003 # Prime dataset = dataset.shuffle(local_seed) def data_generator(): for i in range(len(dataset)): yield dataset[i]["question"], list(dataset[i]["answer"]["normalized_aliases"]) gen = data_generator() gen = iter(gen) def generate_data(n): tasks, answers = [], [] for _i in range(n): q, a = next(gen) tasks.append(q) answers.append(a) return tasks, answers def exact_match_reward(responses, answers=None): """Reward if generated response contains correct answer.""" rewards = [] for response, answer in zip(responses, answers): reward = 0.0 for a in answer: if a.lower() in response.lower(): reward += 1.0 break rewards.append(torch.tensor(reward)) return rewards def tool_fn(x): # limit the amount of tokens return tool(x).split("\n")[1][:600] # text env tool = load_tool("vwxyzjn/pyserini-wikipedia-kilt-doc") text_env = TextEnvironment( model, tokenizer, {"Wiki": tool_fn}, exact_match_reward, prompt, generation_kwargs=generation_kwargs, max_tool_reponse=400, ) def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) print_trainable_parameters(model) # main training loop for i in range(args.iterations): tasks, answers = generate_data(config.batch_size) queries, responses, masks, rewards, histories = text_env.run(tasks, answers=answers) train_stats = ppo_trainer.step(queries, responses, rewards, masks) response_texts = [tokenizer.decode(response) for response in responses] query_texts = [tokenizer.decode(query) for query in queries] texts = { "query": [qt.split("<submit>")[-1].strip() for qt in query_texts], "response": response_texts, "answer": [", ".join(item) for item in answers], } all_rewards = ppo_trainer.accelerator.gather(torch.tensor(rewards, device=ppo_trainer.accelerator.device)) ppo_trainer.log_stats(train_stats, texts, list(all_rewards), columns_to_log=["query", "response", "answer"]) if i % 100 == 0: ppo_trainer.save_pretrained(f"models/{args.model_name}_{args.seed}_{i}_triviaqa")
trl/examples/research_projects/tools/triviaqa.py/0
{ "file_path": "trl/examples/research_projects/tools/triviaqa.py", "repo_id": "trl", "token_count": 2555 }
653
# flake8: noqa # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # regular: python examples/scripts/vsft_llava.py \ --dataset_name="HuggingFaceH4/llava-instruct-mix-vsft" \ --model_name_or_path="llava-hf/llava-1.5-7b-hf" \ --report_to="wandb" \ --learning_rate=1.4e-5 \ --per_device_train_batch_size=8 \ --gradient_accumulation_steps=1 \ --output_dir="data/vsft-llava-1.5-7b-hf" \ --logging_steps=5 \ --num_train_epochs=1 \ --push_to_hub \ --gradient_checkpointing \ --remove_unused_columns=False \ --torch_dtype=float16 \ --fp16=True # peft: python examples/scripts/vsft_llava.py \ --dataset_name="HuggingFaceH4/llava-instruct-mix-vsft" \ --model_name_or_path="llava-hf/llava-1.5-7b-hf" \ --report_to="wandb" \ --learning_rate=1.4e-5 \ --per_device_train_batch_size=8 \ --gradient_accumulation_steps=1 \ --output_dir="data/vsft-llava-1.5-7b-hf" \ --logging_steps=5 \ --num_train_epochs=1 \ --push_to_hub \ --gradient_checkpointing \ --remove_unused_columns=False \ --torch_dtype=float16 \ --fp16=True \ --use_peft=True \ --lora_r=64 \ --lora_alpha=16 \ --lora_target_modules=all-linear" # evaluation: To evaluate, first install the lmms-eval framework: pip install git+https://github.com/EvolvingLMMs-Lab/lmms-eval.git then run: accelerate launch --num_processes=8 -m lmms_eval \ --model llava_hf \ --model_args pretrained=llava-hf/llava-1.5-7b-hf \ --tasks mmbench \ --batch_size 1 \ --output_path ./logs/ \ --log_sample """ import logging import os from contextlib import nullcontext TRL_USE_RICH = os.environ.get("TRL_USE_RICH", False) from trl.commands.cli_utils import init_zero_verbose, SFTScriptArguments, TrlParser if TRL_USE_RICH: init_zero_verbose() FORMAT = "%(message)s" from rich.console import Console from rich.logging import RichHandler import torch from accelerate import Accelerator from datasets import load_dataset from tqdm.rich import tqdm from transformers import AutoTokenizer, AutoProcessor, LlavaForConditionalGeneration from trl import ( ModelConfig, RichProgressCallback, SFTConfig, SFTTrainer, get_peft_config, get_quantization_config, get_kbit_device_map, ) tqdm.pandas() if TRL_USE_RICH: logging.basicConfig(format=FORMAT, datefmt="[%X]", handlers=[RichHandler()], level=logging.INFO) if __name__ == "__main__": parser = TrlParser((SFTScriptArguments, SFTConfig, ModelConfig)) sft_script_args, training_args, model_config = parser.parse_args_and_config() training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) # Force use our print callback if TRL_USE_RICH: training_args.disable_tqdm = True console = Console() ################ # Model, Tokenizer & Processor ################ LLAVA_CHAT_TEMPLATE = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path, use_fast=True) tokenizer.chat_template = LLAVA_CHAT_TEMPLATE processor = AutoProcessor.from_pretrained(model_config.model_name_or_path) processor.tokenizer = tokenizer model = LlavaForConditionalGeneration.from_pretrained(model_config.model_name_or_path, **model_kwargs) ################ # Create a data collator to encode text and image pairs ################ class LLavaDataCollator: def __init__(self, processor): self.processor = processor def __call__(self, examples): texts = [] images = [] for example in examples: if len(example["images"]) > 1: raise ValueError("This collator only supports one image per example") messages = example["messages"] text = self.processor.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=False ) texts.append(text) images.append(example["images"][0]) batch = self.processor(texts, images, return_tensors="pt", padding=True) labels = batch["input_ids"].clone() if self.processor.tokenizer.pad_token_id is not None: labels[labels == self.processor.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch data_collator = LLavaDataCollator(processor) ################ # Dataset ################ raw_datasets = load_dataset(sft_script_args.dataset_name) train_dataset = raw_datasets[sft_script_args.dataset_train_split] eval_dataset = raw_datasets[sft_script_args.dataset_test_split] ################ # Optional rich context managers ############### init_context = nullcontext() if not TRL_USE_RICH else console.status("[bold green]Initializing the SFTTrainer...") save_context = ( nullcontext() if not TRL_USE_RICH else console.status(f"[bold green]Training completed! Saving the model to {training_args.output_dir}") ) ################ # Training ################ with init_context: trainer = SFTTrainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, dataset_text_field="text", # need a dummy field tokenizer=tokenizer, peft_config=get_peft_config(model_config), callbacks=[RichProgressCallback] if TRL_USE_RICH else None, data_collator=data_collator, dataset_kwargs={"skip_prepare_dataset": True}, ) trainer.train() with save_context: trainer.save_model(training_args.output_dir) trainer.push_to_hub() if Accelerator().is_main_process: processor.push_to_hub(training_args.hub_model_id)
trl/examples/scripts/vsft_llava.py/0
{ "file_path": "trl/examples/scripts/vsft_llava.py", "repo_id": "trl", "token_count": 3193 }
654
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from datasets import Dataset from parameterized import parameterized from pytest import mark from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer from trl import CPOConfig, CPOTrainer from .testing_utils import require_peft class CPOTrainerTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token # get t5 as seq2seq example: model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration-correct-vocab" cls.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id) cls.t5_tokenizer = AutoTokenizer.from_pretrained(model_id) def _init_dummy_dataset(self): # fmt: off dummy_dataset_dict = { "prompt": [ "hello", "how are you", "What is your name?", "What is your name?", "Which is the best programming language?", "Which is the best programming language?", "Which is the best programming language?", "[INST] How is the stock price? [/INST]", "[INST] How is the stock price? [/INST] ", ], "chosen": [ "hi nice to meet you", "I am fine", "My name is Mary", "My name is Mary", "Python", "Python", "Python", "$46 as of 10am EST", "46 as of 10am EST", ], "rejected": [ "leave me alone", "I am not fine", "Whats it to you?", "I dont have a name", "Javascript", "C++", "Java", " $46 as of 10am EST", " 46 as of 10am EST", ], } # fmt: on return Dataset.from_dict(dummy_dataset_dict) @parameterized.expand( [ ["gpt2", "sigmoid"], ["t5", "hinge"], ["gpt2", "ipo"], ["t5", "ipo"], ] ) def test_cpo_trainer(self, name, loss_type): with tempfile.TemporaryDirectory() as tmp_dir: training_args = CPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=1, learning_rate=9e-1, evaluation_strategy="steps", beta=0.1, loss_type=loss_type, ) dummy_dataset = self._init_dummy_dataset() if name == "gpt2": model = self.model tokenizer = self.tokenizer elif name == "t5": model = self.t5_model tokenizer = self.t5_tokenizer training_args.is_encoder_decoder = True trainer = CPOTrainer( model=model, args=training_args, tokenizer=tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() assert trainer.state.log_history[-1]["train_loss"] is not None # check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: assert not torch.equal(param, new_param) @require_peft @mark.peft_test def test_cpo_trainer_with_lora(self): from peft import LoraConfig lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) with tempfile.TemporaryDirectory() as tmp_dir: training_args = CPOConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, evaluation_strategy="steps", beta=0.1, ) dummy_dataset = self._init_dummy_dataset() trainer = CPOTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, peft_config=lora_config, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() assert trainer.state.log_history[-1]["train_loss"] is not None # check the params have changed for n, param in previous_trainable_params.items(): if "lora" in n: new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: assert not torch.equal(param, new_param)
trl/tests/test_cpo_trainer.py/0
{ "file_path": "trl/tests/test_cpo_trainer.py", "repo_id": "trl", "token_count": 3234 }
655
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np import pytest import torch from datasets import Dataset, Image, Sequence from transformers import ( AutoModelForCausalLM, AutoProcessor, AutoTokenizer, LlavaForConditionalGeneration, ) from trl import SFTConfig, SFTTrainer from trl.import_utils import is_peft_available, is_pil_available from trl.trainer import ConstantLengthDataset, DataCollatorForCompletionOnlyLM from .testing_utils import require_peft, requires_pil def formatting_prompts_func(example): text = f"### Question: {example['question']}\n ### Answer: {example['answer']}" return text def formatting_prompts_func_batched(example): output_text = [] for i, question in enumerate(example["question"]): text = f"### Question: {question}\n ### Answer: {example['answer'][i]}" output_text.append(text) return output_text if is_peft_available(): from peft import LoraConfig, PeftModel if is_pil_available(): from PIL import Image as PILImage class SFTTrainerTester(unittest.TestCase): r""" """ @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForCausalLM.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token cls.dummy_dataset = Dataset.from_dict( { "question": [ "Does llamas know how to code?", "Does llamas know how to fly?", "Does llamas know how to talk?", "Does llamas know how to code?", "Does llamas know how to fly?", "Does llamas know how to talk?", "Does llamas know how to swim?", ], "answer": [ "Yes, llamas are very good at coding.", "No, llamas can't fly.", "Yes, llamas are very good at talking.", "Yes, llamas are very good at coding.", "No, llamas can't fly.", "Yes, llamas are very good at talking.", "No, llamas can't swim.", ], "text": [ "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", "### Question: Does llamas know how to code?\n ### Answer: Yes, llamas are very good at coding.", "### Question: Does llamas know how to fly?\n ### Answer: No, llamas can't fly.", "### Question: Does llamas know how to talk?\n ### Answer: Yes, llamas are very good at talking.", "### Question: Does llamas know how to swim?\n ### Answer: No, llamas can't swim.", ], } ) cls.dummy_chatml_dataset = Dataset.from_dict( { "messages": [ [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, {"role": "user", "content": "What is 2+2?"}, {"role": "assistant", "content": "4"}, {"role": "user", "content": "What is 3+3?"}, {"role": "assistant", "content": "6"}, ], [ {"role": "system", "content": "You are helpful"}, {"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi, how can I help you?"}, ], ] } ) cls.dummy_instruction_dataset = Dataset.from_list( [ {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, {"prompt": "What is 2+2?", "completion": "4"}, {"prompt": "What is 3+3?", "completion": "6"}, {"prompt": "What is 4+4?", "completion": "8"}, ] ) if is_pil_available(): cls.dummy_vsft_instruction_dataset = Dataset.from_dict( { "messages": [ [ { "role": "user", "content": [{"type": "text", "text": "What is in this image?"}, {"type": "image"}], }, { "role": "assistant", "content": [{"type": "text", "text": "It is random noise."}], }, { "role": "user", "content": [{"type": "text", "text": "Oh ye, you are right, what is 1+1"}], }, { "role": "assistant", "content": [{"type": "text", "text": "2"}], }, ], [ { "role": "user", "content": [{"type": "text", "text": "What is in this image?"}, {"type": "image"}], }, { "role": "assistant", "content": [{"type": "text", "text": "It is random noise."}], }, ], ], "images": [ [PILImage.fromarray((np.random.rand(40, 50, 3) * 255).astype("uint8")).convert("RGBA")], [PILImage.fromarray((np.random.rand(50, 60, 3) * 255).astype("uint8")).convert("RGBA")], ], } ) cls.dummy_vsft_instruction_dataset = cls.dummy_vsft_instruction_dataset.cast_column( "images", Sequence(Image()) ) cls.train_dataset = ConstantLengthDataset( cls.tokenizer, cls.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, seq_length=16, num_of_sequences=16, ) cls.eval_dataset = ConstantLengthDataset( cls.tokenizer, cls.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, seq_length=16, num_of_sequences=16, ) def test_constant_length_dataset(self): formatted_dataset = ConstantLengthDataset( self.tokenizer, self.dummy_dataset, dataset_text_field=None, formatting_func=formatting_prompts_func, ) assert len(formatted_dataset) == len(self.dummy_dataset) assert len(formatted_dataset) > 0 for example in formatted_dataset: assert "input_ids" in example assert "labels" in example assert len(example["input_ids"]) == formatted_dataset.seq_length assert len(example["labels"]) == formatted_dataset.seq_length decoded_text = self.tokenizer.decode(example["input_ids"]) assert ("Question" in decoded_text) and ("Answer" in decoded_text) def test_sft_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, packing=True, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") def test_sft_trainer_uncorrect_data(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=True, ) with pytest.raises(ValueError): _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, ) # this should work since the dummy chatml include the correct format training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, max_seq_length=32, # make sure there is at least 1 packed sequence num_of_sequences=32, packing=True, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_chatml_dataset, ) training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=False, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_chatml_dataset, ) # this should work since the dummy instruction dataset is the correct format training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, max_seq_length=16, # make sure there is at least 1 packed sequence packing=True, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_instruction_dataset, ) training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=False, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_instruction_dataset, ) training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, max_seq_length=32, # make sure there is at least 1 packed sequence packing=True, ) # This should work _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, ) with pytest.raises(ValueError): # This should not work because not enough data for one sample training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, max_seq_length=1024, # make sure there is NOT at least 1 packed sequence packing=True, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, ) # This should not work as well with pytest.raises(ValueError): training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=False, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, ) # but this should work training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=False, ) _ = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func_batched, ) def test_sft_trainer_with_model_num_train_epochs(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, dataset_text_field="text", max_seq_length=16, num_of_sequences=16, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, num_train_epochs=2, per_device_train_batch_size=2, dataset_text_field="text", max_seq_length=16, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_sft_trainer_with_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, dataset_text_field="text", max_seq_length=16, num_of_sequences=16, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") # with formatting_func + packed with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, max_seq_length=16, num_of_sequences=16, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") # with formatting_func + packed with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, max_seq_length=16, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, formatting_func=formatting_prompts_func_batched, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, save_steps=1, per_device_train_batch_size=2, dataset_text_field="text", max_seq_length=16, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.dummy_dataset, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_sft_trainer_with_multiple_eval_datasets(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=1, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=True, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset={ "data1": self.eval_dataset, "data2": self.eval_dataset, }, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_data1_loss"] is not None assert trainer.state.log_history[1]["eval_data2_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-1") def test_data_collator_completion_lm(self): response_template = "### Response:\n" data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=self.tokenizer, mlm=False) text = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" encoded_text = self.tokenizer(text) examples = [encoded_text] batch = data_collator(examples) labels = batch["labels"] last_pad_idx = np.where(labels == -100)[1][-1] result_text = self.tokenizer.decode(batch["input_ids"][0, last_pad_idx + 1 :]) assert result_text == "I have not been masked correctly." def test_data_collator_completion_lm_with_multiple_text(self): tokenizer = copy.deepcopy(self.tokenizer) tokenizer.padding_side = "left" response_template = "### Response:\n" data_collator = DataCollatorForCompletionOnlyLM(response_template, tokenizer=tokenizer, mlm=False) text1 = """\n\n### Instructions:\nHello all this should be masked\n\n### Response:\nI have not been masked correctly.""" text2 = """\n\n### Instructions:\nThis is another longer text that should also be masked. This text is significantly longer than the previous one.\n\n### Response:\nI have not been masked correctly.""" encoded_text1 = tokenizer(text1) encoded_text2 = tokenizer(text2) examples = [encoded_text1, encoded_text2] batch = data_collator(examples) for i in range(2): labels = batch["labels"][i] last_pad_idx = np.where(labels == -100)[0][-1] result_text = tokenizer.decode(batch["input_ids"][i, last_pad_idx + 1 :]) assert result_text == "I have not been masked correctly." def test_data_collator_chat_completion_lm(self): instruction_template = "### Human:" assistant_template = "### Assistant:" data_collator = DataCollatorForCompletionOnlyLM( response_template=assistant_template, instruction_template=instruction_template, tokenizer=self.tokenizer, mlm=False, ) text = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" encoded_text = self.tokenizer(text) examples = [encoded_text] batch = data_collator(examples) labels = batch["labels"] non_masked_tokens = batch["input_ids"][labels != -100] result_text = self.tokenizer.decode(non_masked_tokens) assert result_text == " I should not be masked. I should not be masked too." def test_data_collator_chat_completion_lm_with_multiple_text(self): tokenizer = copy.deepcopy(self.tokenizer) tokenizer.padding_side = "left" instruction_template = "### Human:" assistant_template = "### Assistant:" data_collator = DataCollatorForCompletionOnlyLM( response_template=assistant_template, instruction_template=instruction_template, tokenizer=tokenizer, mlm=False, ) text1 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.""" text2 = """### Human: Hello all this should be masked.### Assistant: I should not be masked.### Human: All this should be masked too.### Assistant: I should not be masked too.""" encoded_text1 = tokenizer(text1) encoded_text2 = tokenizer(text2) examples = [encoded_text1, encoded_text2] batch = data_collator(examples) labels = batch["labels"] input_ids = batch["input_ids"] non_masked_tokens1 = input_ids[0][labels[0] != -100] result_text1 = tokenizer.decode(non_masked_tokens1) assert result_text1 == " I should not be masked." non_masked_tokens2 = input_ids[1][labels[1] != -100] result_text2 = tokenizer.decode(non_masked_tokens2) assert result_text2 == " I should not be masked. I should not be masked too." def test_sft_trainer_infinite_with_model(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=5, eval_steps=1, save_steps=1, per_device_train_batch_size=2, packing=True, max_seq_length=500, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) assert trainer.train_dataset.infinite trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None # make sure the trainer did 5 steps assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-5") def test_sft_trainer_infinite_with_model_epochs(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, num_train_epochs=1, per_device_train_batch_size=2, save_strategy="epoch", packing=True, max_seq_length=500, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) assert not trainer.train_dataset.infinite trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None # make sure the trainer did 5 steps assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-4") def test_sft_trainer_with_model_neftune(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=2, eval_steps=1, save_steps=1, per_device_train_batch_size=2, neftune_noise_alpha=5, packing=True, ) trainer = SFTTrainer( model=self.model, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.model = trainer._trl_activate_neftune(trainer.model) device = trainer.model.get_input_embeddings().weight.device trainer.model.train() torch.random.manual_seed(42) embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) torch.random.manual_seed(24) embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) assert not torch.allclose(embeds_neftune, embeds_neftune_2) assert len(trainer.model.get_input_embeddings()._forward_hooks) > 0 trainer.neftune_hook_handle.remove() trainer.train() # Make sure forward pass works fine _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) assert len(trainer.model.get_input_embeddings()._forward_hooks) == 0 @require_peft def test_peft_sft_trainer_str(self): with tempfile.TemporaryDirectory() as tmp_dir: peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) training_args = SFTConfig(packing=True, output_dir=tmp_dir) _ = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, ) @require_peft def test_peft_sft_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, packing=True, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") @require_peft def test_peft_sft_trainer_gc(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, packing=True, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") @require_peft def test_peft_sft_trainer_neftune(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, neftune_noise_alpha=5, packing=True, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, ) trainer.model = trainer._trl_activate_neftune(trainer.model) assert isinstance(trainer.model, PeftModel) device = trainer.model.get_input_embeddings().weight.device trainer.model.train() torch.random.manual_seed(42) embeds_neftune = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) torch.random.manual_seed(24) embeds_neftune_2 = trainer.model.get_input_embeddings()(torch.LongTensor([[1, 0, 1]]).to(device)) assert not torch.allclose(embeds_neftune, embeds_neftune_2) assert len(trainer.model.get_input_embeddings()._forward_hooks) > 0 trainer.neftune_hook_handle.remove() trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "adapter_model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2") assert "adapter_config.json" in os.listdir(tmp_dir + "/checkpoint-2") assert "model.safetensors" not in os.listdir(tmp_dir + "/checkpoint-2") # Make sure forward pass works fine to check if embeddings forward is not broken. _ = trainer.model(torch.LongTensor([[1, 0, 1]]).to(device)) assert len(trainer.model.get_input_embeddings()._forward_hooks) == 0 @require_peft def test_peft_sft_trainer_tag(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, packing=True, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=peft_config, ) assert trainer.model.model_tags == trainer._tag_names @require_peft def test_sft_trainer_tag(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, packing=True, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) assert trainer.model.model_tags == trainer._tag_names def test_sft_trainer_eval_packing(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, packing=True, max_seq_length=32, # make sure there is at least 1 packed sequence eval_packing=False, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.dummy_chatml_dataset, eval_dataset=self.dummy_chatml_dataset, ) assert len(trainer.train_dataset["input_ids"]) == 1 assert len(trainer.eval_dataset["input_ids"]) != 1 training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, max_seq_length=32, # make sure there is at least 1 packed sequence packing=True, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.dummy_chatml_dataset, eval_dataset=self.dummy_chatml_dataset, ) assert len(trainer.train_dataset["input_ids"]) == 1 assert len(trainer.eval_dataset["input_ids"]) == 1 training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, max_seq_length=32, # make sure there is at least 1 packed sequence packing=False, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.dummy_chatml_dataset, eval_dataset=self.dummy_chatml_dataset, ) assert len(trainer.train_dataset["input_ids"]) != 1 assert len(trainer.eval_dataset["input_ids"]) != 1 @requires_pil def test_sft_trainer_skip_prepare_dataset(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, gradient_checkpointing=True, remove_unused_columns=False, dataset_text_field="text", # need a dummy field dataset_kwargs={"skip_prepare_dataset": True}, ) trainer = SFTTrainer( model=self.model_id, args=training_args, train_dataset=self.dummy_vsft_instruction_dataset, eval_dataset=self.dummy_vsft_instruction_dataset, ) assert trainer.train_dataset.features == self.dummy_vsft_instruction_dataset.features assert trainer.eval_dataset.features == self.dummy_vsft_instruction_dataset.features @requires_pil def test_sft_trainer_llava(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = SFTConfig( output_dir=tmp_dir, dataloader_drop_last=True, evaluation_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, per_device_eval_batch_size=2, remove_unused_columns=False, dataset_text_field="text", # need a dummy field dataset_kwargs={"skip_prepare_dataset": True}, ) tiny_llava = LlavaForConditionalGeneration.from_pretrained( "trl-internal-testing/tiny-random-LlavaForConditionalGeneration" ) processor = AutoProcessor.from_pretrained("trl-internal-testing/tiny-random-LlavaForConditionalGeneration") processor.tokenizer.chat_template = """{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. {% for message in messages %}{% if message['role'] == 'user' %}USER: {% else %}ASSISTANT: {% endif %}{% for item in message['content'] %}{% if item['type'] == 'text' %}{{ item['text'] }}{% elif item['type'] == 'image' %}<image>{% endif %}{% endfor %}{% if message['role'] == 'user' %} {% else %}{{eos_token}}{% endif %}{% endfor %}{% if add_generation_prompt %}ASSISTANT: {% endif %}""" class LLavaDataCollator: def __init__(self, processor): self.processor = processor def __call__(self, examples): texts = [] images = [] for example in examples: if len(example["images"]) > 1: raise ValueError("This collator only supports one image per example") messages = example["messages"] text = self.processor.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=False ) texts.append(text) images.append(example["images"][0]) batch = self.processor(texts, images, return_tensors="pt", padding=True) labels = batch["input_ids"].clone() if self.processor.tokenizer.pad_token_id is not None: labels[labels == self.processor.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch data_collator = LLavaDataCollator(processor) trainer = SFTTrainer( model=tiny_llava, args=training_args, train_dataset=self.dummy_vsft_instruction_dataset, eval_dataset=self.dummy_vsft_instruction_dataset, data_collator=data_collator, ) trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None assert trainer.state.log_history[0]["eval_loss"] is not None assert "model.safetensors" in os.listdir(tmp_dir + "/checkpoint-2")
trl/tests/test_sft_trainer.py/0
{ "file_path": "trl/tests/test_sft_trainer.py", "repo_id": "trl", "token_count": 25235 }
656
# Copyright 2023 DDPO-pytorch authors (Kevin Black), The HuggingFace Team, metric-space. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from diffusers import DDIMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg from ..core import randn_tensor from ..import_utils import is_peft_available from .sd_utils import convert_state_dict_to_diffusers if is_peft_available(): from peft import LoraConfig from peft.utils import get_peft_model_state_dict @dataclass class DDPOPipelineOutput: """ Output class for the diffusers pipeline to be finetuned with the DDPO trainer Args: images (`torch.Tensor`): The generated images. latents (`List[torch.Tensor]`): The latents used to generate the images. log_probs (`List[torch.Tensor]`): The log probabilities of the latents. """ images: torch.Tensor latents: torch.Tensor log_probs: torch.Tensor @dataclass class DDPOSchedulerOutput: """ Output class for the diffusers scheduler to be finetuned with the DDPO trainer Args: latents (`torch.Tensor`): Predicted sample at the previous timestep. Shape: `(batch_size, num_channels, height, width)` log_probs (`torch.Tensor`): Log probability of the above mentioned sample. Shape: `(batch_size)` """ latents: torch.Tensor log_probs: torch.Tensor class DDPOStableDiffusionPipeline: """ Main class for the diffusers pipeline to be finetuned with the DDPO trainer """ def __call__(self, *args, **kwargs) -> DDPOPipelineOutput: raise NotImplementedError def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput: raise NotImplementedError @property def unet(self): """ Returns the 2d U-Net model used for diffusion. """ raise NotImplementedError @property def vae(self): """ Returns the Variational Autoencoder model used from mapping images to and from the latent space """ raise NotImplementedError @property def tokenizer(self): """ Returns the tokenizer used for tokenizing text inputs """ raise NotImplementedError @property def scheduler(self): """ Returns the scheduler associated with the pipeline used for the diffusion process """ raise NotImplementedError @property def text_encoder(self): """ Returns the text encoder used for encoding text inputs """ raise NotImplementedError @property def autocast(self): """ Returns the autocast context manager """ raise NotImplementedError def set_progress_bar_config(self, *args, **kwargs): """ Sets the progress bar config for the pipeline """ raise NotImplementedError def save_pretrained(self, *args, **kwargs): """ Saves all of the model weights """ raise NotImplementedError def get_trainable_layers(self, *args, **kwargs): """ Returns the trainable parameters of the pipeline """ raise NotImplementedError def save_checkpoint(self, *args, **kwargs): """ Light wrapper around accelerate's register_save_state_pre_hook which is run before saving state """ raise NotImplementedError def load_checkpoint(self, *args, **kwargs): """ Light wrapper around accelerate's register_lad_state_pre_hook which is run before loading state """ raise NotImplementedError def _left_broadcast(input_tensor, shape): """ As opposed to the default direction of broadcasting (right to left), this function broadcasts from left to right Args: input_tensor (`torch.FloatTensor`): is the tensor to broadcast shape (`Tuple[int]`): is the shape to broadcast to """ input_ndim = input_tensor.ndim if input_ndim > len(shape): raise ValueError( "The number of dimensions of the tensor to broadcast cannot be greater than the length of the shape to broadcast to" ) return input_tensor.reshape(input_tensor.shape + (1,) * (len(shape) - input_ndim)).broadcast_to(shape) def _get_variance(self, timestep, prev_timestep): alpha_prod_t = torch.gather(self.alphas_cumprod, 0, timestep.cpu()).to(timestep.device) alpha_prod_t_prev = torch.where( prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod, ).to(timestep.device) beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) return variance def scheduler_step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float = 0.0, use_clipped_model_output: bool = False, generator=None, prev_sample: Optional[torch.FloatTensor] = None, ) -> DDPOSchedulerOutput: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. eta (`float`): weight of noise for added noise in diffusion step. use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would coincide with the one provided as input and `use_clipped_model_output` will have not effect. generator: random number generator. variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we can directly provide the noise for the variance itself. This is useful for methods such as CycleDiffusion. (https://arxiv.org/abs/2210.05559) Returns: `DDPOSchedulerOutput`: the predicted sample at the previous timestep and the log probability of the sample """ if self.num_inference_steps is None: raise ValueError( "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps # to prevent OOB on gather prev_timestep = torch.clamp(prev_timestep, 0, self.config.num_train_timesteps - 1) # 2. compute alphas, betas alpha_prod_t = self.alphas_cumprod.gather(0, timestep.cpu()) alpha_prod_t_prev = torch.where( prev_timestep.cpu() >= 0, self.alphas_cumprod.gather(0, prev_timestep.cpu()), self.final_alpha_cumprod, ) alpha_prod_t = _left_broadcast(alpha_prod_t, sample.shape).to(sample.device) alpha_prod_t_prev = _left_broadcast(alpha_prod_t_prev, sample.shape).to(sample.device) beta_prod_t = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) pred_epsilon = model_output elif self.config.prediction_type == "sample": pred_original_sample = model_output pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) elif self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) variance = _get_variance(self, timestep, prev_timestep) std_dev_t = eta * variance ** (0.5) std_dev_t = _left_broadcast(std_dev_t, sample.shape).to(sample.device) if use_clipped_model_output: # the pred_epsilon is always re-derived from the clipped x_0 in Glide pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5) # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf prev_sample_mean = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction if prev_sample is not None and generator is not None: raise ValueError( "Cannot pass both generator and prev_sample. Please make sure that either `generator` or" " `prev_sample` stays `None`." ) if prev_sample is None: variance_noise = randn_tensor( model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype, ) prev_sample = prev_sample_mean + std_dev_t * variance_noise # log prob of prev_sample given prev_sample_mean and std_dev_t log_prob = ( -((prev_sample.detach() - prev_sample_mean) ** 2) / (2 * (std_dev_t**2)) - torch.log(std_dev_t) - torch.log(torch.sqrt(2 * torch.as_tensor(np.pi))) ) # mean along all but batch dimension log_prob = log_prob.mean(dim=tuple(range(1, log_prob.ndim))) return DDPOSchedulerOutput(prev_sample.type(sample.dtype), log_prob) # 1. The output type for call is different as the logprobs are now returned # 2. An extra method called `scheduler_step` is added which is used to constraint the scheduler output @torch.no_grad() def pipeline_step( self, prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, guidance_rescale: float = 0.0, ): r""" Function invoked when calling the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. instead. height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The height in pixels of the generated image. width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. guidance_scale (`float`, *optional*, defaults to 7.5): Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). `guidance_scale` is defined as `w` of equation 2. of [Imagen Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. eta (`float`, *optional*, defaults to 0.0): Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to [`schedulers.DDIMScheduler`], will be ignored for others. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor will ge generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. callback (`Callable`, *optional*): A function that will be called every `callback_steps` steps during inference. The function will be called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`. callback_steps (`int`, *optional*, defaults to 1): The frequency at which the `callback` function will be called. If not specified, the callback will be called at every step. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py). guidance_rescale (`float`, *optional*, defaults to 0.7): Guidance rescale factor proposed by [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf) `guidance_scale` is defined as `φ` in equation 16. of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when using zero terminal SNR. Examples: Returns: `DDPOPipelineOutput`: The generated image, the predicted latents used to generate the image and the associated log probabilities """ # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds, ) # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt text_encoder_lora_scale = cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None prompt_embeds = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, lora_scale=text_encoder_lora_scale, ) # 4. Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # 5. Prepare latent variables num_channels_latents = self.unet.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Denoising loop num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order all_latents = [latents] all_log_probs = [] with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) # predict the noise residual noise_pred = self.unet( latent_model_input, t, encoder_hidden_states=prompt_embeds, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) if do_classifier_free_guidance and guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale) # compute the previous noisy sample x_t -> x_t-1 scheduler_output = scheduler_step(self.scheduler, noise_pred, t, latents, eta) latents = scheduler_output.latents log_prob = scheduler_output.log_probs all_latents.append(latents) all_log_probs.append(log_prob) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: callback(i, t, latents) if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype) else: image = latents has_nsfw_concept = None if has_nsfw_concept is None: do_denormalize = [True] * image.shape[0] else: do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize) # Offload last model to CPU if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: self.final_offload_hook.offload() return DDPOPipelineOutput(image, all_latents, all_log_probs) class DefaultDDPOStableDiffusionPipeline(DDPOStableDiffusionPipeline): def __init__(self, pretrained_model_name: str, *, pretrained_model_revision: str = "main", use_lora: bool = True): self.sd_pipeline = StableDiffusionPipeline.from_pretrained( pretrained_model_name, revision=pretrained_model_revision ) self.use_lora = use_lora self.pretrained_model = pretrained_model_name self.pretrained_revision = pretrained_model_revision try: self.sd_pipeline.load_lora_weights( pretrained_model_name, weight_name="pytorch_lora_weights.safetensors", revision=pretrained_model_revision, ) self.use_lora = True except OSError: if use_lora: warnings.warn( "If you are aware that the pretrained model has no lora weights to it, ignore this message. " "Otherwise please check the if `pytorch_lora_weights.safetensors` exists in the model folder." ) self.sd_pipeline.scheduler = DDIMScheduler.from_config(self.sd_pipeline.scheduler.config) self.sd_pipeline.safety_checker = None # memory optimization self.sd_pipeline.vae.requires_grad_(False) self.sd_pipeline.text_encoder.requires_grad_(False) self.sd_pipeline.unet.requires_grad_(not self.use_lora) def __call__(self, *args, **kwargs) -> DDPOPipelineOutput: return pipeline_step(self.sd_pipeline, *args, **kwargs) def scheduler_step(self, *args, **kwargs) -> DDPOSchedulerOutput: return scheduler_step(self.sd_pipeline.scheduler, *args, **kwargs) @property def unet(self): return self.sd_pipeline.unet @property def vae(self): return self.sd_pipeline.vae @property def tokenizer(self): return self.sd_pipeline.tokenizer @property def scheduler(self): return self.sd_pipeline.scheduler @property def text_encoder(self): return self.sd_pipeline.text_encoder @property def autocast(self): return contextlib.nullcontext if self.use_lora else None def save_pretrained(self, output_dir): if self.use_lora: state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(self.sd_pipeline.unet)) self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict) self.sd_pipeline.save_pretrained(output_dir) def set_progress_bar_config(self, *args, **kwargs): self.sd_pipeline.set_progress_bar_config(*args, **kwargs) def get_trainable_layers(self): if self.use_lora: lora_config = LoraConfig( r=4, lora_alpha=4, init_lora_weights="gaussian", target_modules=["to_k", "to_q", "to_v", "to_out.0"], ) self.sd_pipeline.unet.add_adapter(lora_config) # To avoid accelerate unscaling problems in FP16. for param in self.sd_pipeline.unet.parameters(): # only upcast trainable parameters (LoRA) into fp32 if param.requires_grad: param.data = param.to(torch.float32) return self.sd_pipeline.unet else: return self.sd_pipeline.unet def save_checkpoint(self, models, weights, output_dir): if len(models) != 1: raise ValueError("Given how the trainable params were set, this should be of length 1") if self.use_lora and hasattr(models[0], "peft_config") and getattr(models[0], "peft_config", None) is not None: state_dict = convert_state_dict_to_diffusers(get_peft_model_state_dict(models[0])) self.sd_pipeline.save_lora_weights(save_directory=output_dir, unet_lora_layers=state_dict) elif not self.use_lora and isinstance(models[0], UNet2DConditionModel): models[0].save_pretrained(os.path.join(output_dir, "unet")) else: raise ValueError(f"Unknown model type {type(models[0])}") def load_checkpoint(self, models, input_dir): if len(models) != 1: raise ValueError("Given how the trainable params were set, this should be of length 1") if self.use_lora: lora_state_dict, network_alphas = self.sd_pipeline.lora_state_dict( input_dir, weight_name="pytorch_lora_weights.safetensors" ) self.sd_pipeline.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=models[0]) elif not self.use_lora and isinstance(models[0], UNet2DConditionModel): load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") models[0].register_to_config(**load_model.config) models[0].load_state_dict(load_model.state_dict()) del load_model else: raise ValueError(f"Unknown model type {type(models[0])}")
trl/trl/models/modeling_sd_base.py/0
{ "file_path": "trl/trl/models/modeling_sd_base.py", "repo_id": "trl", "token_count": 11407 }
657
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Optional from transformers import TrainingArguments @dataclass class ORPOConfig(TrainingArguments): r""" ORPOConfig collects all training arguments related to the [`ORPOTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int`, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_completion_length (`int`, defaults to `None`): The maximum length of the completions. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, defaults to 0.1): The beta factor in ORPO loss (lambda/alpha in paper/code) that is the weight of the relative loss ratio in the SFT loss. label_pad_token_id (`int`, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. padding_value (`int`, defaults to `None`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, defaults to `keep_end`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, defaults to `False`): Whether to sample and log generations during evaluation step. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. disable_dropout (`bool`, defaults to `True`): Whether or not to disable dropouts in `model`. model_init_kwargs (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the model from a string dataset_num_proc (`Optional[int]`, *optional*): The number of workers to use to tokenize the data. Defaults to None. """ max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None beta: float = 0.1 disable_dropout: bool = True label_pad_token_id: int = -100 padding_value: int = None truncation_mode: str = "keep_end" generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None model_init_kwargs: Optional[Dict] = None dataset_num_proc: Optional[int] = None
trl/trl/trainer/orpo_config.py/0
{ "file_path": "trl/trl/trainer/orpo_config.py", "repo_id": "trl", "token_count": 1172 }
658
# Big model inference benchmarks Running inference with Accelerate on big models. ## Setup These benchmarks use the `transformers` library: ```bash pip install transformers ``` To reproduce or test a new setup, run ```py python inference_acc.py model_name ``` This script supports `gpt-j-6b`, `gpt-neox`, `opt` (30B version) and `T0pp` out of the box, but you can specify any valid checkpoint for `model_name`. To force a different `torch_dtype` than the one in the config: `--torch_dtype xxx`. If you get an error linked to disk offload, you need to add the option `--disk-offload` ## Results On a setup with two Titan RTXs (24GB of RAM) and 32GB of RAM, we get the following benchmarks (T0pp does not run in float16, which is why it's not included). | Model | Model load time | Generation time | dtype | GPU 0 use | GPU 1 use | CPU use | Disk offload | |:-----:|:---------------:|:---------------:|:-----:|:---------:|:---------:|:-------:|:------------:| | GPT-J-6B | 8.7s | 0.05s per token | float16 | 11.7GB | 0GB | 0GB | no | | GPT-J-6B | 12.4s | 0.06s per token | float32 | 21.9GB | 1.5GB | 0GB | no | | GPT-Neo-X-20B | 30.9s | 0.08s per token | float16 | 21.5GB | 18GB | 0GB | no | | GPT-Neo-X-20B | 78.2s | 10.72s per token | float32 | 20.3GB | 22.7 GB | 24.4GB | yes | | T0pp (11B) | 29.4s | 0.05s per token | float32 | 21.1GB | 21.3GB | 0GB | no | | OPT-30B | 34.5s | 2.37s per token | float16 | 20.7GB | 22.3GB | 14.1GB | no | | OPT-30B | 112.3s | 33.9s per token | float32 | 20.2GB | 21.2GB | 23.5GB | yes | Note on the results: - using two GPUs instead of one does not slow down generation - using CPU offload slows down a bit (see OPT-30b) - using disk offload slows down a lot (need to implement prefetching) You will also note that Accelerate does not use anymore GPU and CPU RAM than necessary: - peak GPU memory is exactly the size of the model put on a given GPU - peak CPU memory is either the size of the biggest checkpoint shard or the part of the model offloaded on CPU, whichever is bigger.
accelerate/benchmarks/README.md/0
{ "file_path": "accelerate/benchmarks/README.md", "repo_id": "accelerate", "token_count": 702 }
0
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # TPU training A [TPU (Tensor Processing Unit)](https://cloud.google.com/tpu/docs/intro-to-tpu) is a type of hardware specifically designed for training models efficiently. Accelerate supports TPU training, but there are a few things you should be aware of, namely graph compilation. This tutorial briefly discusses compilation, and for more details, take a look at the [Training on TPUs with Accelerate](../concept_guides/training_tpu) guide. ## Compilation A TPU creates a graph of all the operations in the training step such as the forward pass, backward pass and optimizer step. This is why the first training step always takes a while because building and compiling this graph takes time. But once compilation is complete, it is cached and all subsequent steps are much faster. The key is to avoid compiling your code again or else training is super slow. This means all your operations must be exactly the same: * all tensors in your batches must have the same length (for example, no dynamic padding for NLP tasks) * your code must be static (for example, no layers with for loops that have different lengths depending on the input such as a LSTM) ## Weight tying A common language model design is to tie the weights of the embedding and softmax layers. However, moving the model to a TPU (either yourself or passing it to the [`~Accelerator.prepare`] method) breaks the weight tying and you'll need to retie the weights. To add special behavior (like weight tying) in your script for TPUs, set [`~Accelerator.distributed_type`] to `DistributedType.TPU` first. Then you can use the [`~transformers.PreTrainedModel.tie_weights`] method to tie the weights. ```py if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() ```
accelerate/docs/source/basic_tutorials/tpu.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/tpu.md", "repo_id": "accelerate", "token_count": 629 }
1
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Learning how to incorporate 🤗 Accelerate features quickly! Please use the interactive tool below to help you get started with learning about a particular feature of 🤗 Accelerate and how to utilize it! It will provide you with a code diff, an explanation towards what is going on, as well as provide you with some useful links to explore more within the documentation! Most code examples start from the following python code before integrating 🤗 Accelerate in some way: ```python for batch in dataloader: optimizer.zero_grad() inputs, targets = batch inputs = inputs.to(device) targets = targets.to(device) outputs = model(inputs) loss = loss_function(outputs, targets) loss.backward() optimizer.step() scheduler.step() ``` <div class="block dark:hidden"> <iframe src="https://hf-accelerate-accelerate-examples.hf.space?__theme=light" width="850" height="1600" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://hf-accelerate-accelerate-examples.hf.space?__theme=dark" width="850" height="1600" ></iframe> </div>
accelerate/docs/source/usage_guides/explore.md/0
{ "file_path": "accelerate/docs/source/usage_guides/explore.md", "repo_id": "accelerate", "token_count": 581 }
2
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the checkpointing capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## MAX_GPU_BATCH_SIZE = 16 EVAL_BATCH_SIZE = 32 def get_dataloaders(accelerator: Accelerator, batch_size: int = 16): """ Creates a set of `DataLoader`s for the `glue` dataset, using "bert-base-cased" as the tokenizer. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. """ tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. max_length = 128 if accelerator.distributed_type == DistributedType.XLA else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": pad_to_multiple_of = 16 elif accelerator.mixed_precision != "no": pad_to_multiple_of = 8 else: pad_to_multiple_of = None return tokenizer.pad( examples, padding="longest", max_length=max_length, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ) # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders get_dataloaders = mocked_dataloaders # noqa: F811 def training_function(config, args): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": config["num_epochs"] = 2 # Initialize accelerator accelerator = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lr = config["lr"] num_epochs = int(config["num_epochs"]) seed = int(config["seed"]) batch_size = int(config["batch_size"]) # New Code # # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, "isdigit"): if args.checkpointing_steps == "epoch": checkpointing_steps = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: raise ValueError( f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." ) else: checkpointing_steps = None set_seed(seed) train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) metric = evaluate.load("glue", "mrpc") # If the batch size is too big we use gradient accumulation gradient_accumulation_steps = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.XLA: gradient_accumulation_steps = batch_size // MAX_GPU_BATCH_SIZE batch_size = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). model = model.to(accelerator.device) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=(len(train_dataloader) * num_epochs) // gradient_accumulation_steps, ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # New Code # # We need to keep track of how many total steps we have iterated over overall_step = 0 # We also need to keep track of the stating epoch so files are named properly starting_epoch = 0 # We need to load the checkpoint back in before training here with `load_state` # The total number of epochs is adjusted based on where the state is being loaded from, # as we assume continuation of the same training script if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # Now we train the model for epoch in range(starting_epoch, num_epochs): model.train() # New Code # if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) outputs = model(**batch) loss = outputs.loss loss = loss / gradient_accumulation_steps accelerator.backward(loss) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() # New Code # overall_step += 1 # New Code # # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` # These are saved to folders named `step_{overall_step}` # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" # If mixed precision was used, will also save a "scalar.bin" file if isinstance(checkpointing_steps, int): output_dir = f"step_{overall_step}" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) model.eval() for step, batch in enumerate(eval_dataloader): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric) # New Code # # We save the model, optimizer, lr_scheduler, and seed states by calling `save_state` # These are saved to folders named `epoch_{epoch}` # Will contain files: "pytorch_model.bin", "optimizer.bin", "scheduler.bin", and "random_states.pkl" # If mixed precision was used, will also save a "scalar.bin" file if checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) def main(): parser = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU.", ) parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--output_dir", type=str, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) args = parser.parse_args() config = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(config, args) if __name__ == "__main__": main()
accelerate/examples/by_feature/checkpointing.py/0
{ "file_path": "accelerate/examples/by_feature/checkpointing.py", "repo_id": "accelerate", "token_count": 5198 }
3
#!/bin/bash #SBATCH --job-name=multigpu #SBATCH -D . #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j #SBATCH --nodes=1 #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --gres=gpu:4 # number of GPUs per node #SBATCH --cpus-per-task=160 # number of cores per tasks #SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS) ###################### ### Set enviroment ### ###################### source activateEnviroment.sh export GPUS_PER_NODE=4 ###################### export SCRIPT=/accelerate/examples/complete_nlp_example.py export SCRIPT_ARGS=" \ --mixed_precision fp16 \ --output_dir /accelerate/examples/output \ --with_tracking \ " accelerate launch --num_processes $GPUS_PER_NODE $SCRIPT $SCRIPT_ARGS
accelerate/examples/slurm/submit_multigpu.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multigpu.sh", "repo_id": "accelerate", "token_count": 326 }
4
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup extras = {} extras["quality"] = [ "black ~= 23.1", # hf-doc-builder has a hidden dependency on `black` "hf-doc-builder >= 0.3.0", "ruff ~= 0.2.1", ] extras["docs"] = [] extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"] extras["test_dev"] = [ "datasets", "diffusers", "evaluate", "torchpippy>=0.2.0", "transformers", "scipy", "scikit-learn", "tqdm", "bitsandbytes", "timm", ] extras["testing"] = extras["test_prod"] + extras["test_dev"] extras["deepspeed"] = ["deepspeed<=0.14.0"] extras["rich"] = ["rich"] extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"] extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] extras["sagemaker"] = [ "sagemaker", # boto3 is a required package in sagemaker ] setup( name="accelerate", version="0.31.0.dev0", description="Accelerate", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="zach.mueller@huggingface.co", url="https://github.com/huggingface/accelerate", package_dir={"": "src"}, packages=find_packages("src"), entry_points={ "console_scripts": [ "accelerate=accelerate.commands.accelerate_cli:main", "accelerate-config=accelerate.commands.config:main", "accelerate-estimate-memory=accelerate.commands.estimate:main", "accelerate-launch=accelerate.commands.launch:main", ] }, python_requires=">=3.8.0", install_requires=[ "numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub", "safetensors>=0.3.1", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one): # git checkout -b vXX.xx-release # The -b is only necessary for creation (so remove it when doing a patch) # 2. Change the version in __init__.py and setup.py to the proper value. # 3. Commit these changes with the message: "Release: v<VERSION>" # 4. Add a tag in git to mark the release: # git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' # Push the tag and release commit to git: git push --tags origin vXX.xx-release # 5. Run the following commands in the top-level directory: # rm -rf dist # rm -rf build # python setup.py bdist_wheel # python setup.py sdist # 6. Upload the package to the pypi test server first: # twine upload dist/* -r testpypi # 7. Check that you can install it in a virtualenv by running: # pip install accelerate # pip uninstall accelerate # pip install -i https://testpypi.python.org/pypi accelerate # accelerate env # accelerate test # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag in github once everything is looking hunky-dory. # 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to # main.
accelerate/setup.py/0
{ "file_path": "accelerate/setup.py", "repo_id": "accelerate", "token_count": 1663 }
5
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import model_info from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate import init_empty_weights from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import ( calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available, ) if is_transformers_available(): import transformers from transformers import AutoConfig, AutoModel if is_timm_available(): import timm def verify_on_hub(repo: str, token: str = None): "Verifies that the model is on the hub and returns the model info." try: return model_info(repo, token=token) except GatedRepoError: return "gated" except RepositoryNotFoundError: return "repo" def check_has_model(error): """ Checks what library spawned `error` when a model is not found """ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: return "timm" elif ( is_transformers_available() and isinstance(error, OSError) and "does not appear to have a file named" in error.args[0] ): return "transformers" else: return "unknown" def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): """ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. Args: model_name (`str`): The model name on the Hub library_name (`str`): The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no metadata on the Hub to determine the library. trust_remote_code (`bool`, `optional`, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. access_token (`str`, `optional`, defaults to `None`): The access token to use to access private or gated models on the Hub. (for use on the Gradio app) Returns: `torch.nn.Module`: The torch model that has been initialized on the `meta` device. """ model_info = verify_on_hub(model_name, access_token) # Simplified errors if model_info == "gated": raise GatedRepoError( f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." ) elif model_info == "repo": raise RepositoryNotFoundError( f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," " make sure you are authenticated via `huggingface-cli login` and have access." ) if library_name is None: library_name = getattr(model_info, "library_name", False) if not library_name: raise ValueError( f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" ) if library_name == "transformers": if not is_transformers_available(): raise ImportError( f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" ) print(f"Loading pretrained config for `{model_name}` from `transformers`...") if model_info.config is None: raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") auto_map = model_info.config.get("auto_map", False) config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) with init_empty_weights(): # remote code could specify a specific `AutoModel` class in the `auto_map` constructor = AutoModel if isinstance(auto_map, dict): value = None for key in auto_map.keys(): if key.startswith("AutoModelFor"): value = key break if value is not None: constructor = getattr(transformers, value) model = constructor.from_config(config, trust_remote_code=trust_remote_code) elif library_name == "timm": if not is_timm_available(): raise ImportError( f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" ) print(f"Loading pretrained config for `{model_name}` from `timm`...") with init_empty_weights(): model = timm.create_model(model_name, pretrained=False) else: raise ValueError( f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." ) return model def create_ascii_table(headers: list, rows: list, title: str): "Creates a pretty table from a list of rows, minimal version of `tabulate`." sep_char, in_between = "│", "─" column_widths = [] for i in range(len(headers)): column_values = [row[i] for row in rows] + [headers[i]] max_column_width = max(len(value) for value in column_values) column_widths.append(max_column_width) formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" diff = 0 def make_row(left_char, middle_char, right_char): return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" separator = make_row("├", "┼", "┤") if len(title) > sum(column_widths): diff = abs(len(title) - len(separator)) column_widths[-1] += diff # Update with diff separator = make_row("├", "┼", "┤") initial_rows = [ make_row("┌", in_between, "┐"), f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", make_row("├", "┬", "┤"), ] table = "\n".join(initial_rows) + "\n" column_widths[-1] += diff centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] table += f"{pattern % tuple(centered_line)}\n{separator}\n" for i, line in enumerate(rows): centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] table += f"{pattern % tuple(centered_line)}\n" table += f'└{"┴".join([in_between * n for n in column_widths])}┘' return table def estimate_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("estimate-memory") else: parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") parser.add_argument( "--library_name", type=str, help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", choices=["timm", "transformers"], ) parser.add_argument( "--dtypes", type=str, nargs="+", default=["float32", "float16", "int8", "int4"], help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", choices=["float32", "float16", "int8", "int4"], ) parser.add_argument( "--trust_remote_code", action="store_true", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag should only be used for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.""", default=False, ) if subparsers is not None: parser.set_defaults(func=estimate_command) return parser def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: """ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of 1. Args: bytes (`int`): The size of the model being trained. mixed_precision (`str`): The mixed precision that would be ran. msamp_config (`str`): The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. """ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} fp32_size = bytes fp16_size = bytes // 2 if mixed_precision == "float32": memory_sizes["model"] = fp32_size memory_sizes["gradients"] = fp32_size memory_sizes["optimizer"] = fp32_size * 2 memory_sizes["step"] = fp32_size * 4 elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): # With native `TransformersEngine`, there is no memory savings with FP8 # With mixed precision training, the model has weights stored # in FP16 and FP32 memory_sizes["model"] = fp32_size # 1.5 from weight gradient + computation (GEMM) memory_sizes["gradients"] = fp32_size + fp16_size # 2x from optimizer states memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states memory_sizes["step"] = memory_sizes["optimizer"] return memory_sizes def gather_data(args): "Creates an empty model and gathers the data for the sizes" try: model = create_empty_model( args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code ) except (RuntimeError, OSError) as e: library = check_has_model(e) if library != "unknown": raise RuntimeError( f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." ) raise e total_size, largest_layer = calculate_maximum_sizes(model) data = [] for dtype in args.dtypes: dtype_total_size = total_size dtype_largest_layer = largest_layer[0] dtype_training_size = estimate_training_usage(dtype_total_size, dtype) if dtype == "float16": dtype_total_size /= 2 dtype_largest_layer /= 2 elif dtype == "int8": dtype_total_size /= 4 dtype_largest_layer /= 4 elif dtype == "int4": dtype_total_size /= 8 dtype_largest_layer /= 8 data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) return data def estimate_command(args): data = gather_data(args) for row in data: for i, item in enumerate(row): if isinstance(item, (int, float)): row[i] = convert_bytes(item) elif isinstance(item, dict): training_usage = max(item.values()) row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] title = f"Memory Usage for loading `{args.model_name}`" table = create_ascii_table(headers, data, title) print(table) def main(): parser = estimate_command_parser() args = parser.parse_args() estimate_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/estimate.py/0
{ "file_path": "accelerate/src/accelerate/commands/estimate.py", "repo_id": "accelerate", "token_count": 4972 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import logging import os from .state import PartialState class MultiProcessAdapter(logging.LoggerAdapter): """ An adapter to assist with logging in multiprocess. `log` takes in an additional `main_process_only` kwarg, which dictates whether it should be called on all processes or only the main executed one. Default is `main_process_only=True`. Does not require an `Accelerator` object to be created first. """ @staticmethod def _should_log(main_process_only): "Check if log should be performed" state = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def log(self, level, msg, *args, **kwargs): """ Delegates logger call after checking if we should log. Accepts a new kwarg of `main_process_only`, which will dictate whether it will be logged across all processes or only the main executed one. Default is `True` if not passed Also accepts "in_order", which if `True` makes the processes log one by one, in order. This is much easier to read, but comes at the cost of sometimes needing to wait for the other processes. Default is `False` to not break with the previous behavior. `in_order` is ignored if `main_process_only` is passed. """ if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) main_process_only = kwargs.pop("main_process_only", True) in_order = kwargs.pop("in_order", False) # set `stacklevel` to exclude ourself in `Logger.findCaller()` while respecting user's choice kwargs.setdefault("stacklevel", 2) if self.isEnabledFor(level): if self._should_log(main_process_only): msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) elif in_order: state = PartialState() for i in range(state.num_processes): if i == state.process_index: msg, kwargs = self.process(msg, kwargs) self.logger.log(level, msg, *args, **kwargs) state.wait_for_everyone() @functools.lru_cache(None) def warning_once(self, *args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ self.warning(*args, **kwargs) def get_logger(name: str, log_level: str = None): """ Returns a `logging.Logger` for `name` that can handle multiprocessing. If a log should be called on all processes, pass `main_process_only=False` If a log should be called on all processes and in order, also pass `in_order=True` Args: name (`str`): The name for the logger, such as `__file__` log_level (`str`, *optional*): The log level to use. If not passed, will default to the `LOG_LEVEL` environment variable, or `INFO` if not Example: ```python >>> from accelerate.logging import get_logger >>> from accelerate import Accelerator >>> logger = get_logger(__name__) >>> accelerator = Accelerator() >>> logger.info("My log", main_process_only=False) >>> logger.debug("My log", main_process_only=True) >>> logger = get_logger(__name__, log_level="DEBUG") >>> logger.info("My log") >>> logger.debug("My second log") >>> array = ["a", "b", "c", "d"] >>> letter_at_rank = array[accelerator.process_index] >>> logger.info(letter_at_rank, in_order=True) ``` """ if log_level is None: log_level = os.environ.get("ACCELERATE_LOG_LEVEL", None) logger = logging.getLogger(name) if log_level is not None: logger.setLevel(log_level.upper()) logger.root.setLevel(log_level.upper()) return MultiProcessAdapter(logger, {})
accelerate/src/accelerate/logging.py/0
{ "file_path": "accelerate/src/accelerate/logging.py", "repo_id": "accelerate", "token_count": 1842 }
7
#!/usr/bin/env python # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import ( BatchSampler, DataLoader, Dataset, IterableDataset, RandomSampler, TensorDataset, default_collate, ) from accelerate.accelerator import Accelerator, DataLoaderConfiguration from accelerate.utils.dataclasses import DistributedType NUM_ELEMENTS = 22 NUM_WORKERS = 4 BATCH_SIZE = 4 class DummyDataset(Dataset): def __len__(self): return NUM_ELEMENTS def __getitem__(self, index): squeeze = False if isinstance(index, int): index = [index] squeeze = True elif isinstance(index, slice): index = list(range(*index.indices(self.size))) else: index = list(index) batch = [{"index": i, "label": i % 2, "random_augmentation": torch.rand(1).item()} for i in index] if squeeze: batch = batch[0] return batch class DummyIterableDataset(IterableDataset): def __init__(self, data): self.data = data def __iter__(self): yield from self.data def create_accelerator(even_batches=True): dataloader_config = DataLoaderConfiguration(even_batches=even_batches) accelerator = Accelerator(dataloader_config=dataloader_config) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def create_dataloader(accelerator: Accelerator, dataset_size: int, batch_size: int, iterable: bool = False): """ Create a simple DataLoader to use during the test cases """ if iterable: dataset = DummyIterableDataset(torch.as_tensor(range(dataset_size))) else: dataset = TensorDataset(torch.as_tensor(range(dataset_size))) dl = DataLoader(dataset, batch_size=batch_size) dl = accelerator.prepare(dl) return dl def verify_dataloader_batch_sizes( accelerator: Accelerator, dataset_size: int, batch_size: int, process_0_expected_batch_sizes: List[int], process_1_expected_batch_sizes: List[int], ): """ A helper function for verifying the batch sizes coming from a prepared dataloader in each process """ dl = create_dataloader(accelerator=accelerator, dataset_size=dataset_size, batch_size=batch_size) batch_sizes = [len(batch[0]) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def test_default_ensures_even_batch_sizes(): accelerator = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( accelerator, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1, 1], ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( accelerator, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 2], ) def test_can_disable_even_batches(): accelerator = create_accelerator(even_batches=False) verify_dataloader_batch_sizes( accelerator, dataset_size=3, batch_size=1, process_0_expected_batch_sizes=[1, 1], process_1_expected_batch_sizes=[1], ) verify_dataloader_batch_sizes( accelerator, dataset_size=7, batch_size=2, process_0_expected_batch_sizes=[2, 2], process_1_expected_batch_sizes=[2, 1], ) def test_can_join_uneven_inputs(): accelerator = create_accelerator(even_batches=False) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) batch_idxs = [] with accelerator.join_uneven_inputs([ddp_model]): for batch_idx, batch in enumerate(dl): output = ddp_model(batch[0].float()) loss = output.sum() loss.backward() batch_idxs.append(batch_idx) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def test_join_raises_warning_for_non_ddp_distributed(accelerator): with warnings.catch_warnings(record=True) as w: with accelerator.join_uneven_inputs([Mock()]): pass assert issubclass(w[-1].category, UserWarning) assert "only supported for multi-GPU" in str(w[-1].message) def test_join_can_override_even_batches(): default_even_batches = True overridden_even_batches = False accelerator = create_accelerator(even_batches=default_even_batches) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) train_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) valid_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): train_dl_overridden_value = train_dl.batch_sampler.even_batches valid_dl_overridden_value = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def test_join_can_override_for_mixed_type_dataloaders(): default_even_batches = True overridden_even_batches = False accelerator = create_accelerator(even_batches=default_even_batches) model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) batch_dl = create_dataloader(accelerator, dataset_size=3, batch_size=1) with warnings.catch_warnings(): warnings.filterwarnings("ignore") try: with accelerator.join_uneven_inputs([ddp_model], even_batches=overridden_even_batches): batch_dl_overridden_value = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def test_join_raises_warning_for_iterable_when_overriding_even_batches(): accelerator = create_accelerator() model = torch.nn.Linear(1, 1) ddp_model = accelerator.prepare(model) create_dataloader(accelerator, dataset_size=3, batch_size=1, iterable=True) with warnings.catch_warnings(record=True) as w: with accelerator.join_uneven_inputs([ddp_model], even_batches=False): pass assert issubclass(w[-1].category, UserWarning) assert "only supported for map-style datasets" in str(w[-1].message) def test_data_loader(data_loader, accelerator): # Prepare the DataLoader data_loader = accelerator.prepare(data_loader) all_examples = [] for i, batch in enumerate(data_loader): index, _ = accelerator.gather_for_metrics((batch["index"], batch["label"])) all_examples.extend(index.detach().cpu().numpy().tolist()) # Sort the examples sorted_all_examples = sorted(all_examples) # Check if all elements are present in the sorted list of iterated samples assert ( len(set(sorted_all_examples)) == NUM_ELEMENTS ), "Not all the dataset elements have been iterated in an epoch due to duplication of samples across processes." def main(): accelerator = create_accelerator() torch.manual_seed(accelerator.process_index) accelerator.print("Test that even_batches variable ensures uniform batches across processes") test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled") test_can_disable_even_batches() accelerator.print("Test joining uneven inputs") test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs") test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types") test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders") test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning") original_state = accelerator.state.distributed_type accelerator.state.distributed_type = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(accelerator) accelerator.state.distributed_type = original_state dataset = DummyDataset() # Conventional Dataloader with shuffle=False loader = DataLoader(dataset, shuffle=False, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Conventional Dataloader with shuffle=True loader = DataLoader(dataset, shuffle=True, batch_size=BATCH_SIZE, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Dataloader with batch_sampler sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) loader = DataLoader(dataset, batch_sampler=sampler, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) # Dataloader with sampler as an instance of `BatchSampler` sampler = BatchSampler(RandomSampler(dataset), batch_size=BATCH_SIZE, drop_last=False) loader = DataLoader(dataset, sampler=sampler, batch_size=None, collate_fn=default_collate, num_workers=NUM_WORKERS) test_data_loader(loader, accelerator) if __name__ == "__main__": main()
accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_distributed_data_loop.py", "repo_id": "accelerate", "token_count": 4052 }
8
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess import sys import warnings from ast import literal_eval from shutil import which from typing import Any, Dict, List, Tuple import torch from ..commands.config.config_args import SageMakerConfig from ..utils import ( DynamoBackend, PrecisionType, is_ipex_available, is_mlu_available, is_npu_available, is_torch_xla_available, is_xpu_available, ) from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS from ..utils.other import is_port_in_use, merge_dicts from .dataclasses import DistributedType, SageMakerDistributedType def _filter_args(args, parser, default_args=[]): """ Filters out all `accelerate` specific args """ new_args, _ = parser.parse_known_args(default_args) for key, value in vars(args).items(): if key in vars(new_args).keys(): setattr(new_args, key, value) return new_args def _get_mpirun_args(): """ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs are: OpenMPI, Intel MPI, or MVAPICH. Returns: Program name and arg names for hostfile, num processes, and processes per node """ # Find the MPI program name mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] if len(mpi_apps) == 0: raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") # Call the app with the --version flag to determine which MPI app is installed mpi_app = mpi_apps[0] mpirun_version = subprocess.check_output([mpi_app, "--version"]) if b"Open MPI" in mpirun_version: return mpi_app, "--hostfile", "-n", "--npernode" else: # Intel MPI and MVAPICH both use the same arg names return mpi_app, "-f", "-n", "-ppn" def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct simple launcher environment variables. """ cmd = [] if args.no_python and args.module: raise ValueError("--module and --no_python cannot be used together") if args.mpirun_hostfile is not None: mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg = _get_mpirun_args() mpirun_ccl = getattr(args, "mpirun_ccl", None) num_machines = args.num_machines num_processes = getattr(args, "num_processes", None) nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" cmd += [mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node] if num_processes: cmd += [num_proc_arg, str(num_processes)] if not args.no_python: cmd.append(sys.executable) if args.module: cmd.append("-m") cmd.append(args.training_script) cmd.extend(args.training_script_args) current_env = os.environ.copy() current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if args.gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = args.gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if args.num_machines > 1: current_env["MASTER_ADDR"] = args.main_process_ip current_env["MASTER_PORT"] = str(args.main_process_port) if args.mpirun_hostfile is not None: current_env["CCL_WORKER_COUNT"] = mpirun_ccl elif args.num_processes > 1: current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if is_ipex_available(): current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower() if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return cmd, current_env def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]: """ Prepares and returns an environment with the correct multi-GPU environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port if num_machines > 1: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids mixed_precision = args.mixed_precision.lower() try: mixed_precision = PrecisionType(mixed_precision) except ValueError: raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) if args.use_fsdp: current_env["ACCELERATE_USE_FSDP"] = "true" if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) if args.fsdp_auto_wrap_policy is not None: current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) if args.fsdp_transformer_layer_cls_to_wrap is not None: current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) if args.fsdp_backward_prefetch_policy is not None: warnings.warn( "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use" " `fsdp_backward_prefetch` instead", FutureWarning, ) args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy if args.fsdp_backward_prefetch is not None: current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) if args.fsdp_state_dict_type is not None: current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() if args.use_megatron_lm: prefix = "MEGATRON_LM_" current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) if args.megatron_lm_num_micro_batches is not None: current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) if args.megatron_lm_sequence_parallelism is not None: current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) if args.megatron_lm_recompute_activations is not None: current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) if args.megatron_lm_use_distributed_optimizer is not None: current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return current_env def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port cmd = None # make sure launcher is not None if args.deepspeed_multinode_launcher is None: # set to default pdsh args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: cmd = ["deepspeed", "--no_local_rank"] cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) if args.deepspeed_exclusion_filter is not None: cmd.extend( [ "--exclude", str(args.deepspeed_exclusion_filter), ] ) elif args.deepspeed_inclusion_filter is not None: cmd.extend( [ "--include", str(args.deepspeed_inclusion_filter), ] ) else: cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) if main_process_ip: cmd.extend(["--master_addr", str(main_process_ip)]) cmd.extend(["--master_port", str(main_process_port)]) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: cmd.append("--module") elif args.no_python: cmd.append("--no_python") cmd.append(args.training_script) cmd.extend(args.training_script_args) elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() current_env["ACCELERATE_USE_DEEPSPEED"] = "true" if args.zero_stage is not None: current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) if args.gradient_accumulation_steps is not None: current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) if args.gradient_clipping is not None: current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() if args.offload_optimizer_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() if args.offload_param_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() if args.zero3_init_flag is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() if args.zero3_save_16bit_model is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() if args.deepspeed_config_file is not None: current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" if args.deepspeed_moe_layer_cls_names is not None: current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names) return cmd, current_env def prepare_tpu( args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False ) -> Tuple[argparse.Namespace, Dict[str, str]]: """ Prepares and returns an environment with the correct TPU environment variables. """ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): if args.downcast_bf16: current_env["XLA_DOWNCAST_BF16"] = "1" else: current_env["XLA_USE_BF16"] = "1" if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if pod: # Take explicit args and set them up for XLA args.vm = args.tpu_vm args.tpu = args.tpu_name return args, current_env def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]: if len(nargs) < 0: return {} # helper function to infer type for argsparser def _infer_type(s): try: s = float(s) if s // 1 == s: return int(s) return s except ValueError: return s parser = argparse.ArgumentParser() _, unknown = parser.parse_known_args(nargs) for index, argument in enumerate(unknown): if argument.startswith(("-", "--")): action = None if index + 1 < len(unknown): # checks if next index would be in list if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key # raise an error if element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) else: # raise an error if last element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) # adds argument to parser based on action_store true if action is None: parser.add_argument(argument, type=_infer_type) else: parser.add_argument(argument, action=action) return { key: (literal_eval(value) if value in ("True", "False") else value) for key, value in parser.parse_args(nargs).__dict__.items() } def prepare_sagemager_args_inputs( sagemaker_config: SageMakerConfig, args: argparse.Namespace ) -> Tuple[argparse.Namespace, Dict[str, Any]]: # configure environment print("Configuring Amazon SageMaker environment") os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region # configure credentials if sagemaker_config.profile is not None: os.environ["AWS_PROFILE"] = sagemaker_config.profile elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key else: raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile") # extract needed arguments source_dir = os.path.dirname(args.training_script) if not source_dir: # checks if string is empty source_dir = "." entry_point = os.path.basename(args.training_script) if not entry_point.endswith(".py"): raise ValueError(f'Your training script should be a python script and not "{entry_point}"') print("Converting Arguments to Hyperparameters") hyperparameters = _convert_nargs_to_dict(args.training_script_args) try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) # Environment variables to be set for use during training job environment = { "ACCELERATE_USE_SAGEMAKER": "true", "ACCELERATE_MIXED_PRECISION": str(mixed_precision), "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, "ACCELERATE_DYNAMO_MODE": args.dynamo_mode, "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph), "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic), "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, } # configure distribution set up distribution = None if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} # configure sagemaker inputs sagemaker_inputs = None if sagemaker_config.sagemaker_inputs_file is not None: print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") sagemaker_inputs = {} with open(sagemaker_config.sagemaker_inputs_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") sagemaker_inputs[l[0]] = l[1].strip() print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") # configure sagemaker metrics sagemaker_metrics = None if sagemaker_config.sagemaker_metrics_file is not None: print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") sagemaker_metrics = [] with open(sagemaker_config.sagemaker_metrics_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") metric_dict = { "Name": l[0], "Regex": l[1].strip(), } sagemaker_metrics.append(metric_dict) print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") # configure session print("Creating Estimator") args = { "image_uri": sagemaker_config.image_uri, "entry_point": entry_point, "source_dir": source_dir, "role": sagemaker_config.iam_role_name, "transformers_version": sagemaker_config.transformers_version, "pytorch_version": sagemaker_config.pytorch_version, "py_version": sagemaker_config.py_version, "base_job_name": sagemaker_config.base_job_name, "instance_count": sagemaker_config.num_machines, "instance_type": sagemaker_config.ec2_instance_type, "debugger_hook_config": False, "distribution": distribution, "hyperparameters": hyperparameters, "environment": environment, "metric_definitions": sagemaker_metrics, } if sagemaker_config.additional_args is not None: args = merge_dicts(sagemaker_config.additional_args, args) return args, sagemaker_inputs def env_var_path_add(env_var_name, path_to_add): """ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the caller to set it in os.environ. """ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] paths.append(str(path_to_add)) return ":".join(paths) class PrepareForLaunch: """ Prepare a function that will launched in a distributed setup. Args: launcher (`Callable`): The function to launch. distributed_type ([`~state.DistributedType`]): The distributed type to prepare for. debug (`bool`, *optional*, defaults to `False`): Whether or not this is a debug launch. """ def __init__(self, launcher, distributed_type="NO", debug=False): self.launcher = launcher self.distributed_type = DistributedType(distributed_type) self.debug = debug def __call__(self, index, *args): if self.debug: world_size = int(os.environ.get("WORLD_SIZE")) rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") torch.distributed.init_process_group( "gloo", rank=index, store=torch.distributed.FileStore(rdv_file, world_size), world_size=world_size, ) elif self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ): # Prepare the environment for torch.distributed os.environ["LOCAL_RANK"] = str(index) nproc = int(os.environ.get("NPROC", 1)) node_rank = int(os.environ.get("NODE_RANK", 0)) os.environ["RANK"] = str(nproc * node_rank + index) os.environ["FORK_LAUNCHED"] = str(1) self.launcher(*args)
accelerate/src/accelerate/utils/launch.py/0
{ "file_path": "accelerate/src/accelerate/utils/launch.py", "repo_id": "accelerate", "token_count": 11971 }
9
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, get_launch_command, path_in_accelerate_package, require_fsdp, require_multi_device, require_non_cpu, require_non_torch_xla, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) BERT_BASE_CASED = "bert-base-cased" FP16 = "fp16" BF16 = "bf16" dtypes = [FP16, BF16] @require_fsdp @require_non_cpu @require_non_torch_xla class FSDPPluginIntegration(AccelerateTestCase): def setUp(self): super().setUp() self.dist_env = dict( ACCELERATE_USE_FSDP="true", MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1", ) def test_sharding_strategy(self): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy # check that giving enums works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.dist_env.copy() env["FSDP_SHARDING_STRATEGY"] = f"{i + 1}" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) # check that giving names works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.dist_env.copy() env["FSDP_SHARDING_STRATEGY"] = strategy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) def test_backward_prefetch(self): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH): env = self.dist_env.copy() env["FSDP_BACKWARD_PREFETCH"] = prefetch_policy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": assert fsdp_plugin.backward_prefetch is None else: assert fsdp_plugin.backward_prefetch == BackwardPrefetch(i + 1) def test_state_dict_type(self): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE): env = self.dist_env.copy() env["FSDP_STATE_DICT_TYPE"] = state_dict_type with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.state_dict_type == StateDictType(i + 1) if state_dict_type == "FULL_STATE_DICT": assert fsdp_plugin.state_dict_config.offload_to_cpu assert fsdp_plugin.state_dict_config.rank0_only def test_auto_wrap_policy(self): model = AutoModel.from_pretrained(BERT_BASE_CASED) for policy in FSDP_AUTO_WRAP_POLICY: env = self.dist_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = policy if policy == "TRANSFORMER_BASED_WRAP": env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "BertLayer" elif policy == "SIZE_BASED_WRAP": env["FSDP_MIN_NUM_PARAMS"] = "2000" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) if policy == "NO_WRAP": assert fsdp_plugin.auto_wrap_policy is None else: assert fsdp_plugin.auto_wrap_policy is not None env = self.dist_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "TRANSFORMER_BASED_WRAP" env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "T5Layer" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() with self.assertRaises(Exception) as cm: fsdp_plugin.set_auto_wrap_policy(model) assert "Could not find the transformer layer class to wrap in the model." in str(cm.exception) env = self.dist_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "SIZE_BASED_WRAP" env["FSDP_MIN_NUM_PARAMS"] = "0" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) assert fsdp_plugin.auto_wrap_policy is None def test_mixed_precision(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: env = self.dist_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) def test_mixed_precision_buffer_autocast_override(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: env = self.dist_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=torch.float32) accelerator.state.fsdp_plugin.set_mixed_precision(dtype, buffer_autocast=True, override=True) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) def test_cpu_offload(self): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: env = self.dist_env.copy() env["FSDP_OFFLOAD_PARAMS"] = str(flag).lower() with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_offload == CPUOffload(offload_params=flag) # Skip this test when TorchXLA is available because accelerate.launch does not support TorchXLA FSDP. @require_non_torch_xla @require_fsdp @require_multi_device @slow class FSDPIntegrationTest(TempDirTestCase): test_scripts_folder = path_in_accelerate_package("test_utils", "scripts", "external_deps") def setUp(self): super().setUp() self.performance_lower_bound = 0.82 self.performance_configs = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] self.peak_memory_usage_upper_bound = { "multi_gpu_fp16": 3200, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000, "fsdp_full_shard_transformer_based_wrap_fp16": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } self.n_train = 160 self.n_val = 160 def test_performance(self): self.test_file_path = self.test_scripts_folder / "test_performance.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True) for config in self.performance_configs: cmd_config = cmd.copy() for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "fp32" in config: cmd_config.append("--mixed_precision=no") else: cmd_config.append("--mixed_precision=fp16") if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_checkpointing(self): self.test_file_path = self.test_scripts_folder / "test_checkpointing.py" cmd = get_launch_command( num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True, mixed_precision="fp16", fsdp_transformer_layer_cls_to_wrap="BertLayer", ) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): cmd_config = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={strategy}") if strategy != "FULL_SHARD": continue state_dict_config_index = len(cmd_config) for state_dict_type in FSDP_STATE_DICT_TYPE: # Todo: Currently failing for `LOCAL_STATE_DICT` with error # Unexpected key(s) in state_dict: "_fsdp_wrapped_module._flat_param". if state_dict_type == "LOCAL_STATE_DICT": continue cmd_config = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) cmd_config = cmd_config[:-1] resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0") cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_peak_memory_usage(self): self.test_file_path = self.test_scripts_folder / "test_peak_memory_usage.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0) for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): cmd_config = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"]) else: cmd_config.extend(["--mixed_precision=no"]) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"]) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config)
accelerate/tests/fsdp/test_fsdp.py/0
{ "file_path": "accelerate/tests/fsdp/test_fsdp.py", "repo_id": "accelerate", "token_count": 7367 }
10
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, execute_subprocess_async, require_multi_device, require_non_cpu, require_non_xpu, ) from accelerate.utils import AutocastKwargs, KwargsHandler, TorchDynamoPlugin, clear_environment from accelerate.utils.dataclasses import DistributedType @dataclass class MockClass(KwargsHandler): a: int = 0 b: bool = False c: float = 3.0 class KwargsHandlerTester(unittest.TestCase): def test_kwargs_handler(self): # If no defaults are changed, `to_kwargs` returns an empty dict. assert MockClass().to_kwargs() == {} assert MockClass(a=2).to_kwargs() == {"a": 2} assert MockClass(a=2, b=True).to_kwargs() == {"a": 2, "b": True} assert MockClass(a=2, c=2.25).to_kwargs() == {"a": 2, "c": 2.25} @require_non_cpu @require_non_xpu def test_grad_scaler_kwargs(self): # If no defaults are changed, `to_kwargs` returns an empty dict. scaler_handler = GradScalerKwargs(init_scale=1024, growth_factor=2) AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16", kwargs_handlers=[scaler_handler]) print(accelerator.use_fp16) scaler = accelerator.scaler # Check the kwargs have been applied assert scaler._init_scale == 1024.0 assert scaler._growth_factor == 2.0 # Check the other values are at the default assert scaler._backoff_factor == 0.5 assert scaler._growth_interval == 2000 assert scaler._enabled is True @require_multi_device def test_ddp_kwargs(self): cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] execute_subprocess_async(cmd) @require_non_cpu def test_autocast_kwargs(self): kwargs = AutocastKwargs(enabled=False) AcceleratorState._reset_state() accelerator = Accelerator(mixed_precision="fp16") a_float32 = torch.rand((8, 8), device=accelerator.device) b_float32 = torch.rand((8, 8), device=accelerator.device) c_float32 = torch.rand((8, 8), device=accelerator.device) d_float32 = torch.rand((8, 8), device=accelerator.device) with accelerator.autocast(): e_float16 = torch.mm(a_float32, b_float32) assert e_float16.dtype == torch.float16 with accelerator.autocast(autocast_handler=kwargs): # Convert e_float16 to float32 f_float32 = torch.mm(c_float32, e_float16.float()) assert f_float32.dtype == torch.float32 g_float16 = torch.mm(d_float32, f_float32) # We should be back in fp16 assert g_float16.dtype == torch.float16 def test_torch_dynamo_plugin(self): with clear_environment(): prefix = "ACCELERATE_DYNAMO_" # nvfuser's dynamo backend name is "nvprims_nvfuser" # use "nvfuser" here to cause exception if this test causes os.environ changed permanently os.environ[prefix + "BACKEND"] = "aot_ts_nvfuser" os.environ[prefix + "MODE"] = "reduce-overhead" dynamo_plugin_kwargs = TorchDynamoPlugin().to_kwargs() assert dynamo_plugin_kwargs == {"backend": "aot_ts_nvfuser", "mode": "reduce-overhead"} assert os.environ.get(prefix + "BACKEND") != "aot_ts_nvfuser" def main(): ddp_scaler = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) # Skip this test due to TorchXLA not using torch.nn.parallel.DistributedDataParallel for model wrapping. if accelerator.distributed_type == DistributedType.XLA: return model = torch.nn.Linear(100, 200) model = accelerator.prepare(model) # Check the values changed in kwargs error_msg = "" observed_bucket_cap_map = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg) if __name__ == "__main__": main()
accelerate/tests/test_kwargs_handlers.py/0
{ "file_path": "accelerate/tests/test_kwargs_handlers.py", "repo_id": "accelerate", "token_count": 2247 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import logging import os import re import subprocess import tempfile import unittest import zipfile from pathlib import Path from typing import Optional from unittest import mock import numpy as np import torch # We use TF to parse the logs from accelerate import Accelerator from accelerate.test_utils.testing import ( MockingTestCase, TempDirTestCase, require_clearml, require_comet_ml, require_dvclive, require_pandas, require_tensorboard, require_wandb, skip, ) from accelerate.tracking import CometMLTracker, GeneralTracker from accelerate.utils import ( ProjectConfiguration, is_comet_ml_available, is_dvclive_available, is_tensorboard_available, ) if is_comet_ml_available(): from comet_ml import OfflineExperiment if is_tensorboard_available(): import struct import tensorboard.compat.proto.event_pb2 as event_pb2 if is_dvclive_available(): from dvclive.plots.metric import Metric from dvclive.serialize import load_yaml from dvclive.utils import parse_metrics logger = logging.getLogger(__name__) @require_tensorboard class TensorBoardTrackingTest(unittest.TestCase): def test_init_trackers(self): project_name = "test_project_with_config" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers(project_name, config) accelerator.end_training() for child in Path(f"{dirpath}/{project_name}").glob("*/**"): log = list(filter(lambda x: x.is_file(), child.iterdir()))[0] assert str(log) != "" def test_log(self): project_name = "test_project_with_log" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) accelerator.init_trackers(project_name) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord # Names are randomly generated each time log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0] assert str(log) != "" def test_log_with_tensor(self): project_name = "test_project_with_log" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="tensorboard", project_dir=dirpath) accelerator.init_trackers(project_name) values = {"tensor": torch.tensor(1)} accelerator.log(values, step=0) accelerator.end_training() # Logged values are stored in the outermost-tfevents file and can be read in as a TFRecord # Names are randomly generated each time log = list(filter(lambda x: x.is_file(), Path(f"{dirpath}/{project_name}").iterdir()))[0] # Reading implementation based on https://github.com/pytorch/pytorch/issues/45327#issuecomment-703757685 with open(log, "rb") as f: data = f.read() found_tensor = False while data: header = struct.unpack("Q", data[:8]) event_str = data[12 : 12 + int(header[0])] # 8+4 data = data[12 + int(header[0]) + 4 :] event = event_pb2.Event() event.ParseFromString(event_str) if event.HasField("summary"): for value in event.summary.value: if value.simple_value == 1.0 and value.tag == "tensor": found_tensor = True assert found_tensor, "Converted tensor was not found in the log file!" def test_project_dir(self): with self.assertRaisesRegex(ValueError, "Logging with `tensorboard` requires a `logging_dir`"): _ = Accelerator(log_with="tensorboard") with tempfile.TemporaryDirectory() as dirpath: _ = Accelerator(log_with="tensorboard", project_dir=dirpath) def test_project_dir_with_config(self): config = ProjectConfiguration(total_limit=30) with tempfile.TemporaryDirectory() as dirpath: _ = Accelerator(log_with="tensorboard", project_dir=dirpath, project_config=config) @require_wandb @mock.patch.dict(os.environ, {"WANDB_MODE": "offline"}) class WandBTrackingTest(TempDirTestCase, MockingTestCase): def setUp(self): super().setUp() # wandb let's us override where logs are stored to via the WANDB_DIR env var self.add_mocks(mock.patch.dict(os.environ, {"WANDB_DIR": self.tmpdir})) @staticmethod def parse_log(log: str, section: str, record: bool = True): """ Parses wandb log for `section` and returns a dictionary of all items in that section. Section names are based on the output of `wandb sync --view --verbose` and items starting with "Record" in that result """ # Big thanks to the W&B team for helping us parse their logs pattern = rf"{section} ([\S\s]*?)\n\n" if record: pattern = rf"Record: {pattern}" cleaned_record = re.findall(pattern, log)[0] # A config if section == "config" or section == "history": cleaned_record = re.findall(r'"([a-zA-Z0-9_.,]+)', cleaned_record) return {key: val for key, val in zip(cleaned_record[0::2], cleaned_record[1::2])} # Everything else else: return dict(re.findall(r'(\w+): "([^\s]+)"', cleaned_record)) @skip def test_wandb(self): project_name = "test_project_with_config" accelerator = Accelerator(log_with="wandb") config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} kwargs = {"wandb": {"tags": ["my_tag"]}} accelerator.init_trackers(project_name, config, kwargs) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() # The latest offline log is stored at wandb/latest-run/*.wandb for child in Path(f"{self.tmpdir}/wandb/latest-run").glob("*"): if child.is_file() and child.suffix == ".wandb": cmd = ["wandb", "sync", "--view", "--verbose", str(child)] content = subprocess.check_output(cmd, encoding="utf8", errors="ignore") break # Check HPS through careful parsing and cleaning logged_items = self.parse_log(content, "config") assert logged_items["num_iterations"] == "12" assert logged_items["learning_rate"] == "0.01" assert logged_items["some_boolean"] == "false" assert logged_items["some_string"] == "some_value" assert logged_items["some_string"] == "some_value" # Run tags logged_items = self.parse_log(content, "run", False) assert logged_items["tags"] == "my_tag" # Actual logging logged_items = self.parse_log(content, "history") assert logged_items["total_loss"] == "0.1" assert logged_items["iteration"] == "1" assert logged_items["my_text"] == "some_value" assert logged_items["_step"] == "0" # Comet has a special `OfflineExperiment` we need to use for testing def offline_init(self, run_name: str, tmpdir: str): self.run_name = run_name self.writer = OfflineExperiment(project_name=run_name, offline_directory=tmpdir) logger.info(f"Initialized offline CometML project {self.run_name}") logger.info("Make sure to log any initial configurations with `self.store_init_configuration` before training!") @require_comet_ml @mock.patch.object(CometMLTracker, "__init__", offline_init) class CometMLTest(unittest.TestCase): @staticmethod def get_value_from_key(log_list, key: str, is_param: bool = False): "Extracts `key` from Comet `log`" for log in log_list: j = json.loads(log)["payload"] if is_param and "param" in j.keys(): if j["param"]["paramName"] == key: return j["param"]["paramValue"] if "log_other" in j.keys(): if j["log_other"]["key"] == key: return j["log_other"]["val"] if "metric" in j.keys(): if j["metric"]["metricName"] == key: return j["metric"]["metricValue"] def test_init_trackers(self): with tempfile.TemporaryDirectory() as d: tracker = CometMLTracker("test_project_with_config", d) accelerator = Accelerator(log_with=tracker) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers(None, config) accelerator.end_training() log = os.listdir(d)[0] # Comet is nice, it's just a zip file here # We parse the raw logs p = os.path.join(d, log) archive = zipfile.ZipFile(p, "r") log = archive.open("messages.json").read().decode("utf-8") list_of_json = log.split("\n")[:-1] assert self.get_value_from_key(list_of_json, "num_iterations", True) == 12 assert self.get_value_from_key(list_of_json, "learning_rate", True) == 0.01 assert self.get_value_from_key(list_of_json, "some_boolean", True) is False assert self.get_value_from_key(list_of_json, "some_string", True) == "some_value" def test_log(self): with tempfile.TemporaryDirectory() as d: tracker = CometMLTracker("test_project_with_config", d) accelerator = Accelerator(log_with=tracker) accelerator.init_trackers(None) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() log = os.listdir(d)[0] # Comet is nice, it's just a zip file here # We parse the raw logs p = os.path.join(d, log) archive = zipfile.ZipFile(p, "r") log = archive.open("messages.json").read().decode("utf-8") list_of_json = log.split("\n")[:-1] assert self.get_value_from_key(list_of_json, "curr_step", True) == 0 assert self.get_value_from_key(list_of_json, "total_loss") == 0.1 assert self.get_value_from_key(list_of_json, "iteration") == 1 assert self.get_value_from_key(list_of_json, "my_text") == "some_value" @require_clearml class ClearMLTest(TempDirTestCase, MockingTestCase): def setUp(self): super().setUp() # ClearML offline session location is stored in CLEARML_CACHE_DIR self.add_mocks(mock.patch.dict(os.environ, {"CLEARML_CACHE_DIR": self.tmpdir})) @staticmethod def _get_offline_dir(accelerator): from clearml.config import get_offline_dir return get_offline_dir(task_id=accelerator.get_tracker("clearml", unwrap=True).id) @staticmethod def _get_metrics(offline_dir): metrics = [] with open(os.path.join(offline_dir, "metrics.jsonl")) as f: json_lines = f.readlines() for json_line in json_lines: metrics.extend(json.loads(json_line)) return metrics def test_init_trackers(self): from clearml import Task from clearml.utilities.config import text_to_config_dict Task.set_offline(True) accelerator = Accelerator(log_with="clearml") config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers("test_project_with_config", config) offline_dir = ClearMLTest._get_offline_dir(accelerator) accelerator.end_training() with open(os.path.join(offline_dir, "task.json")) as f: offline_session = json.load(f) clearml_offline_config = text_to_config_dict(offline_session["configuration"]["General"]["value"]) assert config == clearml_offline_config def test_log(self): from clearml import Task Task.set_offline(True) accelerator = Accelerator(log_with="clearml") accelerator.init_trackers("test_project_with_log") values_with_iteration = {"should_be_under_train": 1, "eval_value": 2, "test_value": 3.1, "train_value": 4.1} accelerator.log(values_with_iteration, step=1) single_values = {"single_value_1": 1.1, "single_value_2": 2.2} accelerator.log(single_values) offline_dir = ClearMLTest._get_offline_dir(accelerator) accelerator.end_training() metrics = ClearMLTest._get_metrics(offline_dir) assert (len(values_with_iteration) + len(single_values)) == len(metrics) for metric in metrics: if metric["metric"] == "Summary": assert metric["variant"] in single_values assert metric["value"] == single_values[metric["variant"]] elif metric["metric"] == "should_be_under_train": assert metric["variant"] == "train" assert metric["iter"] == 1 assert metric["value"] == values_with_iteration["should_be_under_train"] else: values_with_iteration_key = metric["variant"] + "_" + metric["metric"] assert values_with_iteration_key in values_with_iteration assert metric["iter"] == 1 assert metric["value"] == values_with_iteration[values_with_iteration_key] def test_log_images(self): from clearml import Task Task.set_offline(True) accelerator = Accelerator(log_with="clearml") accelerator.init_trackers("test_project_with_log_images") base_image = np.eye(256, 256, dtype=np.uint8) * 255 base_image_3d = np.concatenate((np.atleast_3d(base_image), np.zeros((256, 256, 2), dtype=np.uint8)), axis=2) images = { "base_image": base_image, "base_image_3d": base_image_3d, } accelerator.get_tracker("clearml").log_images(images, step=1) offline_dir = ClearMLTest._get_offline_dir(accelerator) accelerator.end_training() images_saved = Path(os.path.join(offline_dir, "data")).rglob("*.jpeg") assert len(list(images_saved)) == len(images) def test_log_table(self): from clearml import Task Task.set_offline(True) accelerator = Accelerator(log_with="clearml") accelerator.init_trackers("test_project_with_log_table") accelerator.get_tracker("clearml").log_table( "from lists with columns", columns=["A", "B", "C"], data=[[1, 3, 5], [2, 4, 6]] ) accelerator.get_tracker("clearml").log_table("from lists", data=[["A2", "B2", "C2"], [7, 9, 11], [8, 10, 12]]) offline_dir = ClearMLTest._get_offline_dir(accelerator) accelerator.end_training() metrics = ClearMLTest._get_metrics(offline_dir) assert len(metrics) == 2 for metric in metrics: assert metric["metric"] in ("from lists", "from lists with columns") plot = json.loads(metric["plot_str"]) if metric["metric"] == "from lists with columns": print(plot["data"][0]) self.assertCountEqual(plot["data"][0]["header"]["values"], ["A", "B", "C"]) self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]]) else: self.assertCountEqual(plot["data"][0]["header"]["values"], ["A2", "B2", "C2"]) self.assertCountEqual(plot["data"][0]["cells"]["values"], [[7, 8], [9, 10], [11, 12]]) @require_pandas def test_log_table_pandas(self): import pandas as pd from clearml import Task Task.set_offline(True) accelerator = Accelerator(log_with="clearml") accelerator.init_trackers("test_project_with_log_table_pandas") accelerator.get_tracker("clearml").log_table( "from df", dataframe=pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]}), step=1 ) offline_dir = ClearMLTest._get_offline_dir(accelerator) accelerator.end_training() metrics = ClearMLTest._get_metrics(offline_dir) assert len(metrics) == 1 assert metrics[0]["metric"] == "from df" plot = json.loads(metrics[0]["plot_str"]) self.assertCountEqual(plot["data"][0]["header"]["values"], [["A"], ["B"], ["C"]]) self.assertCountEqual(plot["data"][0]["cells"]["values"], [[1, 2], [3, 4], [5, 6]]) class MyCustomTracker(GeneralTracker): "Basic tracker that writes to a csv for testing" _col_names = [ "total_loss", "iteration", "my_text", "learning_rate", "num_iterations", "some_boolean", "some_string", ] name = "my_custom_tracker" requires_logging_directory = False def __init__(self, dir: str): self.f = open(f"{dir}/log.csv", "w+") self.writer = csv.DictWriter(self.f, fieldnames=self._col_names) self.writer.writeheader() @property def tracker(self): return self.writer def store_init_configuration(self, values: dict): logger.info("Call init") self.writer.writerow(values) def log(self, values: dict, step: Optional[int]): logger.info("Call log") self.writer.writerow(values) def finish(self): self.f.close() class CustomTrackerTestCase(unittest.TestCase): def test_init_trackers(self): with tempfile.TemporaryDirectory() as d: tracker = MyCustomTracker(d) accelerator = Accelerator(log_with=tracker) config = {"num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value"} accelerator.init_trackers("Some name", config) accelerator.end_training() with open(f"{d}/log.csv") as f: data = csv.DictReader(f) data = next(data) truth = { "total_loss": "", "iteration": "", "my_text": "", "learning_rate": "0.01", "num_iterations": "12", "some_boolean": "False", "some_string": "some_value", } assert data == truth def test_log(self): with tempfile.TemporaryDirectory() as d: tracker = MyCustomTracker(d) accelerator = Accelerator(log_with=tracker) accelerator.init_trackers("Some name") values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} accelerator.log(values, step=0) accelerator.end_training() with open(f"{d}/log.csv") as f: data = csv.DictReader(f) data = next(data) truth = { "total_loss": "0.1", "iteration": "1", "my_text": "some_value", "learning_rate": "", "num_iterations": "", "some_boolean": "", "some_string": "", } assert data == truth @require_dvclive @mock.patch("dvclive.live.get_dvc_repo", return_value=None) class DVCLiveTrackingTest(unittest.TestCase): def test_init_trackers(self, mock_repo): project_name = "test_project_with_config" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="dvclive") config = { "num_iterations": 12, "learning_rate": 1e-2, "some_boolean": False, "some_string": "some_value", } init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}} accelerator.init_trackers(project_name, config, init_kwargs) accelerator.end_training() live = accelerator.trackers[0].live params = load_yaml(live.params_file) assert params == config def test_log(self, mock_repo): project_name = "test_project_with_log" with tempfile.TemporaryDirectory() as dirpath: accelerator = Accelerator(log_with="dvclive", project_dir=dirpath) init_kwargs = {"dvclive": {"dir": dirpath, "save_dvc_exp": False, "dvcyaml": None}} accelerator.init_trackers(project_name, init_kwargs=init_kwargs) values = {"total_loss": 0.1, "iteration": 1, "my_text": "some_value"} # Log step 0 accelerator.log(values) # Log step 1 accelerator.log(values) # Log step 3 (skip step 2) accelerator.log(values, step=3) accelerator.end_training() live = accelerator.trackers[0].live logs, latest = parse_metrics(live) assert latest.pop("step") == 3 assert latest == values scalars = os.path.join(live.plots_dir, Metric.subfolder) for val in values.keys(): val_path = os.path.join(scalars, f"{val}.tsv") steps = [int(row["step"]) for row in logs[val_path]] assert steps == [0, 1, 3]
accelerate/tests/test_tracking.py/0
{ "file_path": "accelerate/tests/test_tracking.py", "repo_id": "accelerate", "token_count": 10034 }
12
# Model arguments model_name_or_path: alignment-handbook/mistral-7b-sft-constitutional-ai torch_dtype: null # Data training arguments # For definitions, see: src/h4/training/config.py dataset_mixer: HuggingFaceH4/ultrafeedback_binarized: 1.0 HuggingFaceH4/cai-conversation-harmless: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # DPOTrainer arguments bf16: true beta: 0.1 do_eval: true do_train: true evaluation_strategy: steps eval_steps: 1000 gradient_accumulation_steps: 1 gradient_checkpointing: true hub_model_id: mistral-7b-dpo-constitutional-ai learning_rate: 5.0e-7 log_level: info logging_steps: 10 lr_scheduler_type: linear max_length: 1024 max_prompt_length: 512 num_train_epochs: 3 optim: rmsprop output_dir: data/mistral-7b-dpo-constitutional-ai per_device_train_batch_size: 2 per_device_eval_batch_size: 8 push_to_hub: true save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/constitutional-ai/dpo/config_anthropic.yaml/0
{ "file_path": "alignment-handbook/recipes/constitutional-ai/dpo/config_anthropic.yaml", "repo_id": "alignment-handbook", "token_count": 373 }
13
# Model arguments model_name_or_path: mistral-community/Mixtral-8x22B-v0.1 model_revision: main torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" dataset_mixer: argilla/distilabel-capybara-dpo-7k-binarized: 1.0 dataset_splits: - train preprocessing_num_workers: 8 # ORPOTrainer arguments bf16: true beta: 0.05 gradient_accumulation_steps: 1 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: true hub_model_id: zephyr-orpo-141b-A35b learning_rate: 5.0e-6 log_level: info logging_steps: 10 lr_scheduler_type: inverse_sqrt max_length: 2048 max_prompt_length: 1792 num_train_epochs: 3 optim: adamw_bnb_8bit output_dir: data/zephyr-orpo-141b-A35b per_device_train_batch_size: 1 push_to_hub: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_steps: 100
alignment-handbook/recipes/zephyr-141b-A35b/orpo/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-141b-A35b/orpo/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 519 }
14
__version__ = "0.3.0.dev0" from .configs import DataArguments, DPOConfig, H4ArgumentParser, ModelArguments, SFTConfig from .data import apply_chat_template, get_datasets from .decontaminate import decontaminate_humaneval from .model_utils import ( get_checkpoint, get_kbit_device_map, get_peft_config, get_quantization_config, get_tokenizer, is_adapter_model, )
alignment-handbook/src/alignment/__init__.py/0
{ "file_path": "alignment-handbook/src/alignment/__init__.py", "repo_id": "alignment-handbook", "token_count": 148 }
15
task: dreambooth base_model: runwayml/stable-diffusion-v1-5 project_name: autotrain-sd15-finetuned backend: local data: path: data/ # store all images in this folder prompt: photo of sks person # prompt for the model params: resolution: 512 batch_size: 1 num_steps: 500 lr: 1e-4 gradient_accumulation: 4 mixed_precision: fp16 train_text_encoder: false xformers: false use_8bit_adam: false hub: username: ${HF_USERNAME} token: ${HF_TOKEN} push_to_hub: true
autotrain-advanced/configs/dreambooth/sd15_colab.yml/0
{ "file_path": "autotrain-advanced/configs/dreambooth/sd15_colab.yml", "repo_id": "autotrain-advanced", "token_count": 185 }
16
# AutoTrain API With AutoTrain API, you can run your own instance of AutoTrain and use it to train models on Hugging Face Spaces infrastructure (local training coming soon). This API is designed to be used with autotrain compatible models and datasets, and it provides a simple interface to train models with minimal configuration. ## Getting Started To get started with AutoTrain API, all you need to do is install `autotrain-advanced` as discussed in running locally section and run the autotrain app command: ```bash $ autotrain app --port 8000 --host 127.0.0.1 ``` You can then access the API reference at `http://127.0.0.1:8000/docs`. ## Example Usage ```bash curl -X POST "http://127.0.0.1:8000/api/create_project" \ -H "Content-Type: application/json" \ -H "Authorization: Bearer hf_XXXXX" \ -d '{ "username": "abhishek", "project_name": "my-autotrain-api-model", "task": "llm:orpo", "base_model": "meta-llama/Meta-Llama-3-8B-Instruct", "hub_dataset": "argilla/distilabel-capybara-dpo-7k-binarized", "train_split": "train", "hardware": "spaces-a10g-large", "column_mapping": { "text_column": "chosen", "rejected_text_column": "rejected", "prompt_text_column": "prompt" }, "params": { "block_size": 1024, "model_max_length": 4096, "max_prompt_length": 512, "epochs": 1, "batch_size": 2, "lr": 0.00003, "peft": true, "quantization": "int4", "target_modules": "all-linear", "padding": "right", "optimizer": "adamw_torch", "scheduler": "linear", "gradient_accumulation": 4, "mixed_precision": "fp16", "chat_template": "chatml" } }' ```
autotrain-advanced/docs/source/autotrain_api.mdx/0
{ "file_path": "autotrain-advanced/docs/source/autotrain_api.mdx", "repo_id": "autotrain-advanced", "token_count": 940 }
17
# Quickstart Guide to AutoTrain on Hugging Face Spaces AutoTrain on Hugging Face Spaces is the preferred choice for a streamlined experience in model training. This platform is optimized for ease of use, with pre-installed dependencies and managed hardware resources. AutoTrain on Hugging Face Spaces can be used both by no-code users and developers, making it versatile for various levels of expertise. ## Creating a New AutoTrain Space Getting started with AutoTrain is straightforward. Here’s how you can create your new space: 1. **Visit the AutoTrain Page**: To create a new space with AutoTrain Docker image, all you need to do is go to [AutoTrain Homepage](https://hf.co/autotrain) and click on "Create new project". 2. **Log In or View the Setup Screen**: If not logged in, you'll be prompted to do so. Then, you’ll see a screen similar to this: ![autotrain-duplicate-space](https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/static/duplicate_space.png) 3. **Set Up Your Space**: - **Choose a Space Name**: Name your space something relevant to your project. - **Allocate Hardware Resources**: Select the necessary computational resources based on your project needs. - **Duplicate Space**: Click on "Duplicate Space" to initiate your AutoTrain space with the Docker image. 4. **Configuration Options**: - PAUSE_ON_FAILURE: Set this to 0 if you prefer the space not to pause on training failures, useful for running continuous experiments. This option can also be used if you continuously want to perfom many experiments in the same space. 5. **Launch and Train**: - Once done, in a few seconds, the AutoTrain Space will be up and running and you will be presented with the following screen: ![autotrain-space](https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/static/autotrain_space.png) - From here, you can select tasks, upload datasets, choose models, adjust hyperparameters (if needed), and start the training process directly within the space. - The space will manage its own activity, shutting down post-training unless configured otherwise based on the `PAUSE_ON_FAILURE` setting. 6. **Monitoring Progress**: - All training logs and progress can be monitored via TensorBoard, accessible under `username/project_name` on the Hugging Face Hub. - Once training concludes successfully, you’ll find the model files in the same repository. 7. **Navigating the UI**: - If you need help understanding any UI elements, click on the small (i) information icons for detailed descriptions. If you are confused about the UI elements, click on the small (i) information icon to get more information about the UI element. For data formats and detailed parameter information, please see the Data Formats and Parameters section where we provide example datasets and detailed information about the parameters for each task supported by AutoTrain. ## Ensuring Your AutoTrain is Up-to-Date We are constantly adding new features and tasks to AutoTrain Advanced. To benefit from the latest features, tasks, and bug fixes, update your AutoTrain space regularly: - *Factory Reboot*: Navigate to the settings page of your space and click on "Factory reboot" to upgrade to the latest version of AutoTrain Advanced. ![autotrain-space-template](https://raw.githubusercontent.com/huggingface/autotrain-advanced/main/static/space_template_5.png) - *Note*: Simply "restarting" the space does not update it; a factory reboot is necessary for a complete update. For additional details on data formats and specific parameters, refer to the 'Data Formats and Parameters' section where we provide example datasets and extensive parameter information for each supported task by AutoTrain. With these steps, you can effortlessly initiate and manage your AutoTrain projects on Hugging Face Spaces, leveraging the platform's robust capabilities for your machine learning and AI needs.
autotrain-advanced/docs/source/quickstart_spaces.mdx/0
{ "file_path": "autotrain-advanced/docs/source/quickstart_spaces.mdx", "repo_id": "autotrain-advanced", "token_count": 975 }
18
import json from dataclasses import dataclass from typing import Union from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.dreambooth.params import DreamBoothTrainingParams from autotrain.trainers.generic.params import GenericParams from autotrain.trainers.image_classification.params import ImageClassificationParams from autotrain.trainers.object_detection.params import ObjectDetectionParams from autotrain.trainers.seq2seq.params import Seq2SeqParams from autotrain.trainers.tabular.params import TabularParams from autotrain.trainers.text_classification.params import TextClassificationParams from autotrain.trainers.text_regression.params import TextRegressionParams from autotrain.trainers.token_classification.params import TokenClassificationParams AVAILABLE_HARDWARE = { # hugging face spaces "spaces-a10g-large": "a10g-large", "spaces-a10g-small": "a10g-small", "spaces-a100-large": "a100-large", "spaces-t4-medium": "t4-medium", "spaces-t4-small": "t4-small", "spaces-cpu-upgrade": "cpu-upgrade", "spaces-cpu-basic": "cpu-basic", "spaces-l4x1": "l4x1", "spaces-l4x4": "l4x4", "spaces-a10g-largex2": "a10g-largex2", "spaces-a10g-largex4": "a10g-largex4", # ngc "dgx-a100": "dgxa100.80g.1.norm", "dgx-2a100": "dgxa100.80g.2.norm", "dgx-4a100": "dgxa100.80g.4.norm", "dgx-8a100": "dgxa100.80g.8.norm", # hugging face endpoints "ep-aws-useast1-s": "aws_us-east-1_gpu_small_g4dn.xlarge", "ep-aws-useast1-m": "aws_us-east-1_gpu_medium_g5.2xlarge", "ep-aws-useast1-l": "aws_us-east-1_gpu_large_g4dn.12xlarge", "ep-aws-useast1-xl": "aws_us-east-1_gpu_xlarge_p4de", "ep-aws-useast1-2xl": "aws_us-east-1_gpu_2xlarge_p4de", "ep-aws-useast1-4xl": "aws_us-east-1_gpu_4xlarge_p4de", "ep-aws-useast1-8xl": "aws_us-east-1_gpu_8xlarge_p4de", # nvcf "nvcf-l40sx1": {"id": "67bb8939-c932-429a-a446-8ae898311856"}, "nvcf-h100x1": {"id": "848348f8-a4e2-4242-bce9-6baa1bd70a66"}, "nvcf-h100x2": {"id": "fb006a89-451e-4d9c-82b5-33eff257e0bf"}, "nvcf-h100x4": {"id": "21bae5af-87e5-4132-8fc0-bf3084e59a57"}, "nvcf-h100x8": {"id": "6e0c2af6-5368-47e0-b15e-c070c2c92018"}, # local "local-ui": "local", "local": "local", "local-cli": "local", } @dataclass class BaseBackend: params: Union[ TextClassificationParams, ImageClassificationParams, LLMTrainingParams, GenericParams, TabularParams, DreamBoothTrainingParams, Seq2SeqParams, TokenClassificationParams, TextRegressionParams, ObjectDetectionParams, ] backend: str def __post_init__(self): self.username = None if isinstance(self.params, GenericParams) and self.backend.startswith("local"): raise ValueError("Local backend is not supported for GenericParams") if ( self.backend.startswith("spaces-") or self.backend.startswith("ep-") or self.backend.startswith("ngc-") or self.backend.startswith("nvcf-") ): if self.params.username is not None: self.username = self.params.username else: raise ValueError("Must provide username") if isinstance(self.params, LLMTrainingParams): self.task_id = 9 elif isinstance(self.params, TextClassificationParams): self.task_id = 2 elif isinstance(self.params, TabularParams): self.task_id = 26 elif isinstance(self.params, GenericParams): self.task_id = 27 elif isinstance(self.params, DreamBoothTrainingParams): self.task_id = 25 elif isinstance(self.params, Seq2SeqParams): self.task_id = 28 elif isinstance(self.params, ImageClassificationParams): self.task_id = 18 elif isinstance(self.params, TokenClassificationParams): self.task_id = 4 elif isinstance(self.params, TextRegressionParams): self.task_id = 10 elif isinstance(self.params, ObjectDetectionParams): self.task_id = 29 else: raise NotImplementedError self.available_hardware = AVAILABLE_HARDWARE self.wait = False if self.backend == "local-ui": self.wait = False if self.backend in ("local", "local-cli"): self.wait = True self.env_vars = { "HF_TOKEN": self.params.token, "AUTOTRAIN_USERNAME": self.username, "PROJECT_NAME": self.params.project_name, "TASK_ID": str(self.task_id), "PARAMS": json.dumps(self.params.model_dump_json()), } if isinstance(self.params, DreamBoothTrainingParams): self.env_vars["DATA_PATH"] = self.params.image_path else: self.env_vars["DATA_PATH"] = self.params.data_path if not isinstance(self.params, GenericParams): self.env_vars["MODEL"] = self.params.model
autotrain-advanced/src/autotrain/backends/base.py/0
{ "file_path": "autotrain-advanced/src/autotrain/backends/base.py", "repo_id": "autotrain-advanced", "token_count": 2416 }
19
from argparse import ArgumentParser from autotrain import logger from autotrain.backends.base import AVAILABLE_HARDWARE from autotrain.backends.spaces import SpaceRunner from autotrain.trainers.generic.params import GenericParams from autotrain.trainers.generic.utils import create_dataset_repo from . import BaseAutoTrainCommand BACKEND_CHOICES = list(AVAILABLE_HARDWARE.keys()) BACKEND_CHOICES = [b for b in BACKEND_CHOICES if b.startswith("spaces-")] def run_spacerunner_command_factory(args): return RunAutoTrainSpaceRunnerCommand(args) class RunAutoTrainSpaceRunnerCommand(BaseAutoTrainCommand): @staticmethod def register_subcommand(parser: ArgumentParser): arg_list = [ { "arg": "--project-name", "help": "Name of the project. Must be unique.", "required": True, "type": str, }, { "arg": "--script-path", "help": "Path to the script", "required": True, "type": str, }, { "arg": "--username", "help": "Hugging Face Username, can also be an organization name", "required": True, "type": str, }, { "arg": "--token", "help": "Hugging Face API Token", "required": True, "type": str, }, { "arg": "--backend", "help": "Hugging Face backend to use", "required": True, "type": str, "choices": BACKEND_CHOICES, }, { "arg": "--env", "help": "Environment variables, e.g. --env FOO=bar;FOO2=bar2;FOO3=bar3", "required": False, "type": str, }, { "arg": "--args", "help": "Arguments to pass to the script, e.g. --args foo=bar;foo2=bar2;foo3=bar3;store_true_arg", "required": False, "type": str, }, ] run_spacerunner_parser = parser.add_parser("spacerunner", description="✨ Run AutoTrain SpaceRunner") for arg in arg_list: names = [arg["arg"]] + arg.get("alias", []) if "action" in arg: run_spacerunner_parser.add_argument( *names, dest=arg["arg"].replace("--", "").replace("-", "_"), help=arg["help"], required=arg.get("required", False), action=arg.get("action"), default=arg.get("default"), choices=arg.get("choices"), ) else: run_spacerunner_parser.add_argument( *names, dest=arg["arg"].replace("--", "").replace("-", "_"), help=arg["help"], required=arg.get("required", False), type=arg.get("type"), default=arg.get("default"), choices=arg.get("choices"), ) run_spacerunner_parser.set_defaults(func=run_spacerunner_command_factory) def __init__(self, args): self.args = args store_true_arg_names = [] for arg_name in store_true_arg_names: if getattr(self.args, arg_name) is None: setattr(self.args, arg_name, False) env_vars = {} if self.args.env: for env_name_value in self.args.env.split(";"): if len(env_name_value.split("=")) == 2: env_vars[env_name_value.split("=")[0]] = env_name_value.split("=")[1] else: raise ValueError("Invalid environment variable format.") self.args.env = env_vars app_args = {} store_true_args = [] if self.args.args: for arg_name_value in self.args.args.split(";"): if len(arg_name_value.split("=")) == 1: store_true_args.append(arg_name_value) elif len(arg_name_value.split("=")) == 2: app_args[arg_name_value.split("=")[0]] = arg_name_value.split("=")[1] else: raise ValueError("Invalid argument format.") for arg_name in store_true_args: app_args[arg_name] = "" self.args.args = app_args def run(self): dataset_id = create_dataset_repo( username=self.args.username, project_name=self.args.project_name, script_path=self.args.script_path, token=self.args.token, ) params = GenericParams( project_name=self.args.project_name, data_path=dataset_id, username=self.args.username, token=self.args.token, script_path=self.args.script_path, env=self.args.env, args=self.args.args, ) project = SpaceRunner(params=params, backend=self.args.backend) job_id = project.create() logger.info(f"Job ID: {job_id}")
autotrain-advanced/src/autotrain/cli/run_spacerunner.py/0
{ "file_path": "autotrain-advanced/src/autotrain/cli/run_spacerunner.py", "repo_id": "autotrain-advanced", "token_count": 2808 }
20
import ast from dataclasses import dataclass from typing import Optional import pandas as pd from datasets import ClassLabel, Dataset, DatasetDict, Sequence from sklearn.model_selection import train_test_split from autotrain import logger RESERVED_COLUMNS = ["autotrain_text", "autotrain_label"] LLM_RESERVED_COLUMNS = [ "autotrain_prompt", "autotrain_context", "autotrain_rejected_text", "autotrain_prompt_start", ] @dataclass class TextBinaryClassificationPreprocessor: train_data: pd.DataFrame text_column: str label_column: str username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 convert_to_class_label: Optional[bool] = False local: Optional[bool] = False def __post_init__(self): # check if text_column and label_column are in train_data if self.text_column not in self.train_data.columns: raise ValueError(f"{self.text_column} not in train data") if self.label_column not in self.train_data.columns: raise ValueError(f"{self.label_column} not in train data") # check if text_column and label_column are in valid_data if self.valid_data is not None: if self.text_column not in self.valid_data.columns: raise ValueError(f"{self.text_column} not in valid data") if self.label_column not in self.valid_data.columns: raise ValueError(f"{self.label_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, stratify=self.train_data[self.label_column], ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_text"] = train_df[self.text_column] train_df.loc[:, "autotrain_label"] = train_df[self.label_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] valid_df.loc[:, "autotrain_label"] = valid_df[self.label_column] # drop text_column and label_column train_df = train_df.drop(columns=[self.text_column, self.label_column]) valid_df = valid_df.drop(columns=[self.text_column, self.label_column]) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df.loc[:, "autotrain_label"] = train_df["autotrain_label"].astype(str) valid_df.loc[:, "autotrain_label"] = valid_df["autotrain_label"].astype(str) label_names = sorted(set(train_df["autotrain_label"].unique().tolist())) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.convert_to_class_label: train_df = train_df.cast_column("autotrain_label", ClassLabel(names=label_names)) valid_df = valid_df.cast_column("autotrain_label", ClassLabel(names=label_names)) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TextMultiClassClassificationPreprocessor(TextBinaryClassificationPreprocessor): pass class TextSingleColumnRegressionPreprocessor(TextBinaryClassificationPreprocessor): def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" class TextTokenClassificationPreprocessor(TextBinaryClassificationPreprocessor): def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) try: train_df.loc[:, "autotrain_text"] = train_df["autotrain_text"].apply(lambda x: ast.literal_eval(x)) valid_df.loc[:, "autotrain_text"] = valid_df["autotrain_text"].apply(lambda x: ast.literal_eval(x)) except ValueError: logger.warning("Unable to do ast.literal_eval on train_df['autotrain_text']") logger.warning("assuming autotrain_text is already a list") try: train_df.loc[:, "autotrain_label"] = train_df["autotrain_label"].apply(lambda x: ast.literal_eval(x)) valid_df.loc[:, "autotrain_label"] = valid_df["autotrain_label"].apply(lambda x: ast.literal_eval(x)) except ValueError: logger.warning("Unable to do ast.literal_eval on train_df['autotrain_label']") logger.warning("assuming autotrain_label is already a list") label_names_train = sorted(set(train_df["autotrain_label"].explode().unique().tolist())) label_names_valid = sorted(set(valid_df["autotrain_label"].explode().unique().tolist())) label_names = sorted(set(label_names_train + label_names_valid)) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.convert_to_class_label: train_df = train_df.cast_column("autotrain_label", Sequence(ClassLabel(names=label_names))) valid_df = valid_df.cast_column("autotrain_label", Sequence(ClassLabel(names=label_names))) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class LLMPreprocessor: train_data: pd.DataFrame username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 text_column: Optional[str] = None prompt_column: Optional[str] = None rejected_text_column: Optional[str] = None local: Optional[bool] = False def __post_init__(self): if self.text_column is None: raise ValueError("text_column must be provided") # check if text_column and rejected_text_column are in train_data if self.prompt_column is not None and self.prompt_column not in self.train_data.columns: self.prompt_column = None if self.rejected_text_column is not None and self.rejected_text_column not in self.train_data.columns: self.rejected_text_column = None # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS + LLM_RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data # no validation is done in llm training if validation data is not provided return self.train_data, self.train_data # else: # train_df, valid_df = train_test_split( # self.train_data, # test_size=self.test_size, # random_state=self.seed, # ) # train_df = train_df.reset_index(drop=True) # valid_df = valid_df.reset_index(drop=True) # return train_df, valid_df def prepare_columns(self, train_df, valid_df): drop_cols = [self.text_column] train_df.loc[:, "autotrain_text"] = train_df[self.text_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] if self.prompt_column is not None: drop_cols.append(self.prompt_column) train_df.loc[:, "autotrain_prompt"] = train_df[self.prompt_column] valid_df.loc[:, "autotrain_prompt"] = valid_df[self.prompt_column] if self.rejected_text_column is not None: drop_cols.append(self.rejected_text_column) train_df.loc[:, "autotrain_rejected_text"] = train_df[self.rejected_text_column] valid_df.loc[:, "autotrain_rejected_text"] = valid_df[self.rejected_text_column] # drop drop_cols train_df = train_df.drop(columns=drop_cols) valid_df = valid_df.drop(columns=drop_cols) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}" @dataclass class Seq2SeqPreprocessor: train_data: pd.DataFrame text_column: str label_column: str username: str project_name: str token: str valid_data: Optional[pd.DataFrame] = None test_size: Optional[float] = 0.2 seed: Optional[int] = 42 local: Optional[bool] = False def __post_init__(self): # check if text_column and label_column are in train_data if self.text_column not in self.train_data.columns: raise ValueError(f"{self.text_column} not in train data") if self.label_column not in self.train_data.columns: raise ValueError(f"{self.label_column} not in train data") # check if text_column and label_column are in valid_data if self.valid_data is not None: if self.text_column not in self.valid_data.columns: raise ValueError(f"{self.text_column} not in valid data") if self.label_column not in self.valid_data.columns: raise ValueError(f"{self.label_column} not in valid data") # make sure no reserved columns are in train_data or valid_data for column in RESERVED_COLUMNS: if column in self.train_data.columns: raise ValueError(f"{column} is a reserved column name") if self.valid_data is not None: if column in self.valid_data.columns: raise ValueError(f"{column} is a reserved column name") def split(self): if self.valid_data is not None: return self.train_data, self.valid_data else: train_df, valid_df = train_test_split( self.train_data, test_size=self.test_size, random_state=self.seed, ) train_df = train_df.reset_index(drop=True) valid_df = valid_df.reset_index(drop=True) return train_df, valid_df def prepare_columns(self, train_df, valid_df): train_df.loc[:, "autotrain_text"] = train_df[self.text_column] train_df.loc[:, "autotrain_label"] = train_df[self.label_column] valid_df.loc[:, "autotrain_text"] = valid_df[self.text_column] valid_df.loc[:, "autotrain_label"] = valid_df[self.label_column] # drop text_column and label_column train_df = train_df.drop(columns=[self.text_column, self.label_column]) valid_df = valid_df.drop(columns=[self.text_column, self.label_column]) return train_df, valid_df def prepare(self): train_df, valid_df = self.split() train_df, valid_df = self.prepare_columns(train_df, valid_df) train_df = Dataset.from_pandas(train_df) valid_df = Dataset.from_pandas(valid_df) if self.local: dataset = DatasetDict( { "train": train_df, "validation": valid_df, } ) dataset.save_to_disk(f"{self.project_name}/autotrain-data") else: train_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="train", private=True, token=self.token, ) valid_df.push_to_hub( f"{self.username}/autotrain-data-{self.project_name}", split="validation", private=True, token=self.token, ) if self.local: return f"{self.project_name}/autotrain-data" return f"{self.username}/autotrain-data-{self.project_name}"
autotrain-advanced/src/autotrain/preprocessor/text.py/0
{ "file_path": "autotrain-advanced/src/autotrain/preprocessor/text.py", "repo_id": "autotrain-advanced", "token_count": 8279 }
21
import torch from peft import LoraConfig from transformers import AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig from transformers.trainer_callback import PrinterCallback from trl import ORPOConfig, ORPOTrainer from autotrain import logger from autotrain.trainers.clm import utils from autotrain.trainers.clm.params import LLMTrainingParams from autotrain.trainers.common import ALLOW_REMOTE_CODE def train(config): logger.info("Starting ORPO training...") if isinstance(config, dict): config = LLMTrainingParams(**config) train_data, valid_data = utils.process_input_data(config) tokenizer = utils.get_tokenizer(config) train_data, valid_data = utils.process_data_with_chat_template(config, tokenizer, train_data, valid_data) logging_steps = utils.configure_logging_steps(config, train_data, valid_data) training_args = utils.configure_training_args(config, logging_steps) config = utils.configure_block_size(config, tokenizer) training_args["max_length"] = config.block_size training_args["max_prompt_length"] = config.max_prompt_length training_args["max_completion_length"] = config.max_completion_length args = ORPOConfig(**training_args) logger.info("loading model config...") model_config = AutoConfig.from_pretrained( config.model, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_cache=config.disable_gradient_checkpointing, ) logger.info("loading model...") if config.peft: if config.quantization == "int4": bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=False, ) elif config.quantization == "int8": bnb_config = BitsAndBytesConfig(load_in_8bit=True) else: bnb_config = None model = AutoModelForCausalLM.from_pretrained( config.model, config=model_config, token=config.token, quantization_config=bnb_config, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2, ) else: model = AutoModelForCausalLM.from_pretrained( config.model, config=model_config, token=config.token, trust_remote_code=ALLOW_REMOTE_CODE, use_flash_attention_2=config.use_flash_attention_2, ) logger.info(f"model dtype: {model.dtype}") model.resize_token_embeddings(len(tokenizer)) if config.peft: peft_config = LoraConfig( r=config.lora_r, lora_alpha=config.lora_alpha, lora_dropout=config.lora_dropout, bias="none", task_type="CAUSAL_LM", target_modules=utils.get_target_modules(config), ) logger.info("creating trainer") callbacks = utils.get_callbacks(config) trainer_args = dict( args=args, model=model, callbacks=callbacks, ) trainer = ORPOTrainer( **trainer_args, train_dataset=train_data, eval_dataset=valid_data if config.valid_split is not None else None, tokenizer=tokenizer, peft_config=peft_config if config.peft else None, ) trainer.remove_callback(PrinterCallback) trainer.train() utils.post_training_steps(config, trainer)
autotrain-advanced/src/autotrain/trainers/clm/train_clm_orpo.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/clm/train_clm_orpo.py", "repo_id": "autotrain-advanced", "token_count": 1540 }
22
import os import subprocess import requests from huggingface_hub import HfApi, snapshot_download from autotrain import logger def create_dataset_repo(username, project_name, script_path, token): logger.info("Creating dataset repo...") api = HfApi(token=token) repo_id = f"{username}/autotrain-{project_name}" api.create_repo( repo_id=repo_id, repo_type="dataset", private=True, ) logger.info("Uploading dataset...") api.upload_folder( folder_path=script_path, repo_id=repo_id, repo_type="dataset", ) logger.info("Dataset uploaded.") return repo_id def pull_dataset_repo(params): snapshot_download( repo_id=params.data_path, local_dir=params.project_name, token=params.token, repo_type="dataset", ) def uninstall_requirements(params): if os.path.exists(f"{params.project_name}/requirements.txt"): # read the requirements.txt uninstall_list = [] with open(f"{params.project_name}/requirements.txt", "r", encoding="utf-8") as f: for line in f: if line.startswith("-"): uninstall_list.append(line[1:]) # create an uninstall.txt with open(f"{params.project_name}/uninstall.txt", "w", encoding="utf-8") as f: for line in uninstall_list: f.write(line) pipe = subprocess.Popen( [ "pip", "uninstall", "-r", "uninstall.txt", "-y", ], cwd=params.project_name, ) pipe.wait() logger.info("Requirements uninstalled.") return def install_requirements(params): # check if params.project_name has a requirements.txt if os.path.exists(f"{params.project_name}/requirements.txt"): # install the requirements using subprocess, wait for it to finish install_list = [] with open(f"{params.project_name}/requirements.txt", "r", encoding="utf-8") as f: for line in f: if not line.startswith("-"): install_list.append(line) with open(f"{params.project_name}/requirements.txt", "w", encoding="utf-8") as f: for line in install_list: f.write(line) pipe = subprocess.Popen( [ "pip", "install", "-r", "requirements.txt", ], cwd=params.project_name, ) pipe.wait() logger.info("Requirements installed.") return logger.info("No requirements.txt found. Skipping requirements installation.") return def run_command(params): if os.path.exists(f"{params.project_name}/script.py"): cmd = ["python", "script.py"] if params.args: for arg in params.args: cmd.append(f"--{arg}") if params.args[arg] != "": cmd.append(params.args[arg]) pipe = subprocess.Popen(cmd, cwd=params.project_name) pipe.wait() logger.info("Command finished.") return raise ValueError("No script.py found.") def pause_endpoint(params): endpoint_id = os.environ["ENDPOINT_ID"] username = endpoint_id.split("/")[0] project_name = endpoint_id.split("/")[1] api_url = f"https://api.endpoints.huggingface.cloud/v2/endpoint/{username}/{project_name}/pause" headers = {"Authorization": f"Bearer {params.token}"} r = requests.post(api_url, headers=headers, timeout=120) return r.json()
autotrain-advanced/src/autotrain/trainers/generic/utils.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/generic/utils.py", "repo_id": "autotrain-advanced", "token_count": 1733 }
23
class TokenClassificationDataset: def __init__(self, data, tokenizer, config): self.data = data self.tokenizer = tokenizer self.config = config def __len__(self): return len(self.data) def __getitem__(self, item): text = self.data[item][self.config.tokens_column] tags = self.data[item][self.config.tags_column] label_list = self.data.features[self.config.tags_column].feature.names label_to_id = {i: i for i in range(len(label_list))} tokenized_text = self.tokenizer( text, max_length=self.config.max_seq_length, padding="max_length", truncation=True, is_split_into_words=True, ) word_ids = tokenized_text.word_ids(batch_index=0) previous_word_idx = None label_ids = [] for word_idx in word_ids: if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: label_ids.append(label_to_id[tags[word_idx]]) else: label_ids.append(label_to_id[tags[word_idx]]) previous_word_idx = word_idx tokenized_text["labels"] = label_ids return tokenized_text
autotrain-advanced/src/autotrain/trainers/token_classification/dataset.py/0
{ "file_path": "autotrain-advanced/src/autotrain/trainers/token_classification/dataset.py", "repo_id": "autotrain-advanced", "token_count": 627 }
24
--- title: "Accelerating Hugging Face Transformers with AWS Inferentia2" thumbnail: /blog/assets/140_accelerate_transformers_with_inferentia2/thumbnail.png authors: - user: philschmid - user: juliensimon --- # Accelerating Hugging Face Transformers with AWS Inferentia2 <script async defer src="https://unpkg.com/medium-zoom-element@0/dist/medium-zoom-element.min.js"></script> In the last five years, Transformer models [[1](https://arxiv.org/abs/1706.03762)] have become the _de facto_ standard for many machine learning (ML) tasks, such as natural language processing (NLP), computer vision (CV), speech, and more. Today, many data scientists and ML engineers rely on popular transformer architectures like BERT [[2](https://arxiv.org/abs/1810.04805)], RoBERTa [[3](https://arxiv.org/abs/1907.11692)], the Vision Transformer [[4](https://arxiv.org/abs/2010.11929)], or any of the 130,000+ pre-trained models available on the [Hugging Face](https://huggingface.co) hub to solve complex business problems with state-of-the-art accuracy. However, for all their greatness, Transformers can be challenging to deploy in production. On top of the infrastructure plumbing typically associated with model deployment, which we largely solved with our [Inference Endpoints](https://huggingface.co/inference-endpoints) service, Transformers are large models which routinely exceed the multi-gigabyte mark. Large language models (LLMs) like [GPT-J-6B](https://huggingface.co/EleutherAI/gpt-j-6B), [Flan-T5](https://huggingface.co/google/flan-t5-xxl), or [Opt-30B](https://huggingface.co/facebook/opt-30b) are in the tens of gigabytes, not to mention behemoths like [BLOOM](https://huggingface.co/bigscience/bloom), our very own LLM, which clocks in at 350 gigabytes. Fitting these models on a single accelerator can be quite difficult, let alone getting the high throughput and low inference latency that applications require, like conversational applications and search. So far, ML experts have designed complex manual techniques to slice large models, distribute them on a cluster of accelerators, and optimize their latency. Unfortunately, this work is extremely difficult, time-consuming, and completely out of reach for many ML practitioners. At Hugging Face, we're democratizing ML and always looking to partner with companies who also believe that every developer and organization should benefit from state-of-the-art models. For this purpose, we're excited to partner with Amazon Web Services to optimize Hugging Face Transformers for AWS [Inferentia 2](https://aws.amazon.com/machine-learning/inferentia/)! It’s a new purpose-built inference accelerator that delivers unprecedented levels of throughput, latency, performance per watt, and scalability. ## Introducing AWS Inferentia2 AWS Inferentia2 is the next generation to Inferentia1 launched in 2019. Powered by Inferentia1, Amazon EC2 Inf1 instances delivered 25% higher throughput and 70% lower cost than comparable G5 instances based on NVIDIA A10G GPU, and with Inferentia2, AWS is pushing the envelope again. The new Inferentia2 chip delivers a 4x throughput increase and a 10x latency reduction compared to Inferentia. Likewise, the new [Amazon EC2 Inf2](https://aws.amazon.com/de/ec2/instance-types/inf2/) instances have up to 2.6x better throughput, 8.1x lower latency, and 50% better performance per watt than comparable G5 instances. Inferentia 2 gives you the best of both worlds: cost-per-inference optimization thanks to high throughput and response time for your application thanks to low inference latency. Inf2 instances are available in multiple sizes, which are equipped with between 1 to 12 Inferentia 2 chips. When several chips are present, they are interconnected by a blazing-fast direct Inferentia2 to Inferentia2 connectivity for distributed inference on large models. For example, the largest instance size, inf2.48xlarge, has 12 chips and enough memory to load a 175-billion parameter model like GPT-3 or BLOOM. Thankfully none of this comes at the expense of development complexity. With [optimum neuron](https://github.com/huggingface/optimum-neuron), you don't need to slice or modify your model. Because of the native integration in [AWS Neuron SDK](https://github.com/aws-neuron/aws-neuron-sdk), all it takes is a single line of code to compile your model for Inferentia 2. You can experiment in minutes! Test the performance your model could reach on Inferentia 2 and see for yourself. Speaking of, let’s show you how several Hugging Face models run on Inferentia 2. Benchmarking time! ## Benchmarking Hugging Face Models on AWS Inferentia 2 We evaluated some of the most popular NLP models from the [Hugging Face Hub](https://huggingface.co/models) including BERT, RoBERTa, DistilBERT, and vision models like Vision Transformers. The first benchmark compares the performance of Inferentia, Inferentia 2, and GPUs. We ran all experiments on AWS with the following instance types: * Inferentia1 - [inf1.2xlarge](https://aws.amazon.com/ec2/instance-types/inf1/?nc1=h_ls) powered by a single Inferentia chip. * Inferentia2 - [inf2.xlarge](https://aws.amazon.com/ec2/instance-types/inf2/?nc1=h_ls) powered by a single Inferentia2 chip. * GPU - [g5.2xlarge](https://aws.amazon.com/ec2/instance-types/g5/) powered by a single NVIDIA A10G GPU. _Note: that we did not optimize the model for the GPU environment, the models were evaluated in fp32._ When it comes to benchmarking Transformer models, there are two metrics that are most adopted: * **Latency**: the time it takes for the model to perform a single prediction (pre-process, prediction, post-process). * **Throughput**: the number of executions performed in a fixed amount of time for one benchmark configuration We looked at latency across different setups and models to understand the benefits and tradeoffs of the new Inferentia2 instance. If you want to run the benchmark yourself, we created a [Github repository](https://github.com/philschmid/aws-neuron-samples/tree/main/benchmark) with all the information and scripts to do so. ### Results The benchmark confirms that the performance improvements claimed by AWS can be reproduced and validated by real use-cases and examples. On average, AWS Inferentia2 delivers 4.5x better latency than NVIDIA A10G GPUs and 4x better latency than Inferentia1 instances. We ran 144 experiments on 6 different model architectures: * Accelerators: Inf1, Inf2, NVIDIA A10G * Models: [BERT-base](https://huggingface.co/bert-base-uncased), [BERT-Large](https://huggingface.co/bert-large-uncased), [RoBERTa-base](https://huggingface.co/roberta-base), [DistilBERT](https://huggingface.co/distilbert-base-uncased), [ALBERT-base](https://huggingface.co/albert-base-v2), [ViT-base](https://huggingface.co/google/vit-base-patch16-224) * Sequence length: 8, 16, 32, 64, 128, 256, 512 * Batch size: 1 In each experiment, we collected numbers for p95 latency. You can find the full details of the benchmark in this spreadsheet: [HuggingFace: Benchmark Inferentia2](https://docs.google.com/spreadsheets/d/1AULEHBu5Gw6ABN8Ls6aSB2CeZyTIP_y5K7gC7M3MXqs/edit?usp=sharing). Let’s highlight a few insights of the benchmark. ### BERT-base Here is the latency comparison for running [BERT-base](https://huggingface.co/bert-base-uncased) on each of the infrastructure setups, with a logarithmic scale for latency. It is remarkable to see how Inferentia2 outperforms all other setups by ~6x for sequence lengths up to 256. <br> <figure class="image table text-center m-0 w-full"> <medium-zoom background="rgba(0,0,0,.7)" alt="BERT-base p95 latency" src="assets/140_accelerate_transformers_with_inferentia2/bert.png"></medium-zoom> <figcaption>Figure 1. BERT-base p95 latency</figcaption> </figure> <br> ### Vision Transformer Here is the latency comparison for running [ViT-base](https://huggingface.co/google/vit-base-patch16-224) on the different infrastructure setups. Inferentia2 delivers 2x better latency than the NVIDIA A10G, with the potential to greatly help companies move from traditional architectures, like CNNs, to Transformers for - real-time applications. <br> <figure class="image table text-center m-0 w-full"> <medium-zoom background="rgba(0,0,0,.7)" alt="ViT p95 latency" src="assets/140_accelerate_transformers_with_inferentia2/vit.png"></medium-zoom> <figcaption>Figure 1. ViT p95 latency</figcaption> </figure> <br> ## Conclusion Transformer models have emerged as the go-to solution for many machine learning tasks. However, deploying them in production has been challenging due to their large size and latency requirements. Thanks to AWS Inferentia2 and the collaboration between Hugging Face and AWS, developers and organizations can now leverage the benefits of state-of-the-art models without the prior need for extensive machine learning expertise. You can start testing for as low as 0.76$/h. The initial benchmarking results are promising, and show that Inferentia2 delivers superior latency performance when compared to both Inferentia and NVIDIA A10G GPUs. This latest breakthrough promises high-quality machine learning models can be made available to a much broader audience delivering AI accessibility to everyone.
blog/accelerate-transformers-with-inferentia2.md/0
{ "file_path": "blog/accelerate-transformers-with-inferentia2.md", "repo_id": "blog", "token_count": 2526 }
25
--- title: "Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers" thumbnail: /blog/assets/49_asr_chunking/thumbnail.png authors: - user: Narsil --- # Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers ``` Tl;dr: This post explains how to use the specificities of the Connectionist Temporal Classification (CTC) architecture in order to achieve very good quality automatic speech recognition (ASR) even on arbitrarily long files or during live inference. ``` **Wav2Vec2** is a popular pre-trained model for speech recognition. Released in [September 2020](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) by Meta AI Research, the novel architecture catalyzed progress in self-supervised pretraining for speech recognition, *e.g.* [*G. Ng et al.*, 2021](https://arxiv.org/pdf/2104.03416.pdf), [*Chen et al*, 2021](https://arxiv.org/abs/2110.13900), [*Hsu et al.*, 2021](https://arxiv.org/abs/2106.07447) and [*Babu et al.*, 2021](https://arxiv.org/abs/2111.09296). On the Hugging Face Hub, Wav2Vec2's most popular pre-trained checkpoint currently amounts to over [**250,000** monthly downloads](https://huggingface.co/facebook/wav2vec2-base-960h). **Wav2Vec2** is at its core a **transformers** models and one caveat of **transformers** is that it usually has a finite amount of sequence length it can handle. Either because it uses **position encodings** (not the case here) or simply because the cost of attention in transformers is actually O(n²) in sequence_length, meaning that using very large sequence_length explodes in complexity/memory. So you cannot run with finite hardware (even a very large GPU like A100), simply run Wav2Vec2 on an hour long file. Your program will crash. Let's try it ! ```bash pip install transformers ``` ```python from transformers import pipeline # This will work on any of the thousands of models at # https://huggingface.co/models?pipeline_tag=automatic-speech-recognition pipe = pipeline(model="facebook/wav2vec2-base-960h") # The Public Domain LibriVox file used for the test #!wget https://ia902600.us.archive.org/8/items/thecantervilleghostversion_2_1501_librivox/thecantervilleghostversion2_01_wilde_128kb.mp3 -o very_long_file.mp3 pipe("very_long_file.mp3") # Crash out of memory ! pipe("very_long_file.mp3", chunk_length_s=10) # This works and prints a very long string ! # This whole blogpost will explain how to make things work ``` Simple Chunking --------------- The simplest way to achieve inference on very long files would be to simply chunk the initial audio into shorter samples, let's say 10 seconds each, run inference on those, and end up with a final reconstruction. This is efficient computationally but usually leads to subpar results, the reason being that in order to do good inference, the model needs some context, so around the chunking border, inference tends to be of poor quality. Look at the following diagram: ![Simple chunking](./assets/49_asr_chunking/chunk.png) There are ways to try and work around the problem in a general fashion, but they are never entirely robust. You can try to chunk only when you encounter silence but you may have a non silent audio for a long time (a song, or noisy café audio). You can also try to cut only when there's no voice but it requires another model and this is not an entirely solved problem. You could also have a continous voice for a very long time. As it turns out, CTC structure, which is used by Wav2Vec2, can be exploited in order to achieve very robust speech recognition even on very long files without falling into those pitfalls. Chunking with stride -------------------- Wav2Vec2 uses the [CTC algorithm](https://distill.pub/2017/ctc/), which means that every frame of audio is mapped to a single letter prediction (logit). ![CTC](./assets/49_asr_chunking/CTC.png) That's the main feature we're going to use in order to add a `stride`. This [link](https://www.quora.com/What-does-stride-mean-in-the-context-of-convolutional-neural-networks) explains it in the image context, but it's the same concept for audio. Because of this property, we can: - Start doing inference on **overlapping** chunks so that the model actually has proper context in the center. - **Drop** the inferenced logits on the side. - Chain the **logits** without their dropped sides to recover something extremely close to what the model would have predicted on the full length audio. ![Striding](./assets/49_asr_chunking/Striding.png) This is not **technically** 100% the same thing as running the model on the whole file so it is not enabled by default, but as you saw in the earlier example you need only to add `chunk_length_s` to your `pipeline` for it to work. In practice, we observed that most of the bad inference is kept within the strides, which get dropped before inference, leading to a proper inference of the full text. Let's note that you can choose every argument of this technique: ```python from transformers import pipeline pipe = pipeline(model="facebook/wav2vec2-base-960h") # stride_length_s is a tuple of the left and right stride length. # With only 1 number, both sides get the same stride, by default # the stride_length on one side is 1/6th of the chunk_length_s output = pipe("very_long_file.mp3", chunk_length_s=10, stride_length_s=(4, 2)) ``` Chunking with stride on LM augmented models ------------------------------------------- In [transformers](https://github.com/huggingface/transformers), we also added support for adding LM to Wav2Vec2 in order to boost the WER performance of the models without even finetuning. [See this excellent blogpost explaining how it works](https://huggingface.co/blog/wav2vec2-with-ngram). It turns out, that the LM works directly on the logits themselves, so we can actually apply the exact same technique as before without any modification ! So chunking large files on these LM boosted models still works out of the box. Live inference -------------- A very nice perk of using a CTC model like Wav2vec2, is that it is a single pass model, so it is **very** fast. Especially on GPU. We can exploit that in order to do live inference. The principle is exactly the same as regular striding, but this time we can feed the pipeline data **as it is coming in** and simply use striding on full chunks of length 10s for instance with 1s striding to get proper context. That requires running much more inference steps than simple file chunking, but it can make the live experience much better because the model can print things as you are speaking, without having to wait for X seconds before seeing something displayed.
blog/asr-chunking.md/0
{ "file_path": "blog/asr-chunking.md", "repo_id": "blog", "token_count": 1890 }
26
--- title: "Optimization story: Bloom inference" thumbnail: /blog/assets/bloom-inference-pytorch-scripts/thumbnail.png authors: - user: Narsil --- # Optimization story: Bloom inference This article gives you the behind-the-scenes of how we made an efficient inference server that powers bloom. inference server that powers [https://huggingface.co/bigscience/bloom](). We achieved a 5x latency reduction over several weeks (and 50x more throughput). We wanted to share all the struggles and epic wins we went through to achieve such speed improvements. A lot of different people were involved at many stages so not everything will be covered here. And please bear with us, some of the content might be outdated or flat out wrong because we're still learning how to optimize extremely large models and lots of new hardware features and content keep coming out regularly. If your favorite flavor of optimizations is not discussed or improperly represented, we're sorry, please share it with us we're more than happy to try out new stuff and correct our mistakes. ## Creating BLOOM This goes without saying but without the large model being accessible in the first place, there would be no real reasons to optimize inference for it. This was an incredible effort led by many different people. To maximize the GPU during training, several solutions were explored and in the end, [Megatron-Deepspeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) was chosen to train the end model. This meant that the code as-is wasn't necessarily compatible with the `transformers` library. ## Porting to transformers Because of the original training code, we set out to do something which we regularly do: port an existing model to `transformers`. The goal was to extract from the training code the relevant parts and implement it within `transformers`. This effort was tackled by [Younes](/ybelkada). This is by no means a small effort as it took almost a month and [200 commits](https://github.com/huggingface/transformers/pull/17474/commits) to get there. There are several things to note that will come back later: We needed to have smaller models [bigscience/bigscience-small-testing](https://huggingface.co/bigscience/bigscience-small-testing) and [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m). This is extremely important because they are smaller, so everything is faster when working with them. First, you have to abandon all hope to have exactly the same logits at the end down to the bytes. PyTorch versions can change the kernels and introduce subtle differences, and different hardware might yield different results because of different architecture (and you probably don't want to develop on a A100 GPU all the time for cost reasons). ***Getting a good strict test suite is really important for all models*** The best test we found was having a fixed set of prompts. You know the prompt, you know the completion that needs to be deterministic so greedy. If two generations are identical, you can basically ignore small logits differences Whenever you see a drift, you need to investigate. It could be that your code is not doing what it should OR that you are actually out of domain for that model and therefore the model is more sensitive to noise. If you have several prompts and long enough prompts, you're less likely to trigger that for all prompts by accident. The more prompts the better, the longer the better. The first model (small-testing) is in `bfloat16` like the big bloom so everything should be very similar, but it wasn't trained a lot or just doesn't perform well, so it highly fluctuates in outputs. That means we had issues with those generation tests. The second model is more stable but was trained and saved in `float16` instead of `bfloat16`. That's more room for error between the two. To be perfectly fair `bfloat16` -> `float16` conversion seemed to be OK in inference mode (`bfloat16` mostly exists to handle large gradients, which do not exist in inference). During that step, one important tradeoff was discovered and implemented. Because bloom was trained in a distributed setting, part of the code was doing Tensor parallelism on a Linear layer meaning running the same operation as a single operation on a single GPU was giving [different results](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L350). This took a while to pinpoint and either we went for 100% compliance and the model was much slower, or we would take a small difference in generation but was much faster to run and simpler code. We opted for a configurable flag. ## First inference (PP + Accelerate) ``` Note: Pipeline Parallelism (PP) means in this context that each GPU will own some layers so each GPU will work on a given chunk of data before handing it off to the next GPU. ``` Now we have a workable `transformers` clean version of the start working on running this. Bloom is a 352GB (176B parameters in bf16) model, we need at least that much GPU RAM to make it fit. We briefly explored offloading to CPU on smaller machines but the inference speed was orders of magnitude slower so we discarded it. Then we wanted to basically use the [pipeline](https://huggingface.co/docs/transformers/v4.22.2/en/pipeline_tutorial#pipeline-usage). So it's dogfooding and this is what the API uses under the hood all the time. However `pipelines` are not distributed aware (it's not their goal). After briefly discussing options, we ended up using [accelerate](https://github.com/huggingface/accelerate/) newly created `device_map="auto"` to manage the sharding of the model. We had to iron out a few bugs, and fix the `transformers` code a bit to help `accelerate` do the right job. It works by splitting the various layers of the transformers and giving part of the model to each GPU. So GPU0 gets to work, then hands it over to GPU1 so on and so forth. In the end, with a small HTTP server on top, we could start serving bloom (the big model) !! ## Starting point But we haven't even started discussing optimizations yet! We actually have quite a bit, all this process is a castle of cards. During optimizations we are going to make modifications to the underlying code, being extra sure you're not killing the model in one way or the other is really important and easier to do than you think. So we are now at the very first step of optimizations and we need to start measuring and keep measuring performance. So we need to consider what we care about. For an open inference server supporting many options, we expect users to send many queries with different parameters and what we care about are: The number of users we can serve at the same time (throughput) How long does it take for an average user to be served (latency)? We made a testing script in [locust](https://locust.io/) which is exactly this: ```python from locust import HttpUser, between, task from random import randrange, random class QuickstartUser(HttpUser): wait_time = between(1, 5) @task def bloom_small(self): sentence = "Translate to chinese. EN: I like soup. CN: " self.client.post( "/generate", json={ "inputs": sentence[: randrange(1, len(sentence))], "parameters": {"max_new_tokens": 20, "seed": random()}, }, ) @task def bloom_small(self): sentence = "Translate to chinese. EN: I like soup. CN: " self.client.post( "/generate", json={ "inputs": sentence[: randrange(1, len(sentence))], "parameters": { "max_new_tokens": 20, "do_sample": True, "top_p": 0.9, "seed": random(), }, }, ) ``` **Note: This is not the best nor the only load testing we used, but it was always the first to be run so that it could compare fairly across approaches. Being the best on this benchmark does NOT mean it is the best solution. Other more complex scenarios had to be used in addition to actual real-world performance. ** We wanted to observe the ramp-up for various implementations and also make sure that underload the server properly circuit breaked. Circuit breaking means that the server can answer (fast) that it will not answer your query because too many people are trying to use it at the same time. It's extremely important to avoid the hug of death. On this benchmark the initial performance was (on 16xA100 40Go on GCP which is the machine used throughout): Requests/s : 0.3 (throughput) Latency: 350ms/token (latency) Those numbers are not that great. Before getting to work let's estimate the best we can imagine achieving. The formula for amount of operations is `24Bsh^2 + 4𝐵s^2h24Bsh^2 + 4𝐵s^2h` where `B` is the batch size, `s` the sequence length, and `h` the hidden dimension. Let's do the math and we are getting `17 TFlop` for a single forward pass. Looking at the [specs](https://www.nvidia.com/en-us/data-center/a100/) of A100 it claims `312 TFLOPS` for a single card. That means a single GPU could potentially run at `17 / 312 = 54ms/token`. We're using 16 of those so `3ms/token` on the overall machine. Take all these numbers with a big grain of salt, it's never possible to reach those numbers, and real-life performance rarely matches the specs. Also if computation is not your limiting factor then this is not the lowest you can get. It's just good practice to know how far you are from your target. In this case, we're 2 orders of magnitude so pretty far. Also, this estimate puts all the flops at the service of latency which means only a single request can go at a time (it's ok since you're maximizing your machine so there's not much else to be done, but we can have higher latency and get throughput back through batching much more easily). ## Exploring many routes ``` Note: Tensor Parallelism (TP) means in this context that each GPU will own part of the weights, so ALL gpus are active all the time and do less work. Usually this comes with a very slight overhead that some work is duplicated and more importantly that the GPUs regularly have to communicate to each other their results to continue the computation ``` Now that we have a good understanding of where we stand it's time to get to work. We tried many different things based on the people and our various knowledge. ALL endeavors deserve their own blog post so I'll just list them, explain the few final learnings and delve into the details of only what went into the current server. Moving from Pipeline Parallelism (PP) to Tensor Parallelism (TP) is one big interesting change for latency. Each GPU will own part of the parameters and all will be working at the same time. So the latency should decrease drastically but the price to pay is the communication overhead since they regularly need to communicate with each other about their results. It is to note that this is a very wide range of approaches and the intent was deliberately to learn more about each tool and how it could fit in later endeavors. ### Porting the code the JAX/Flax to run on TPUs: - Expected to be easier to choose the type of parallelism. so TP should be easier to test. It's one of the perks of Jax's design. - More constrained on hardware, performance on TPU likely superior than GPU, and less vendor choice for TPU. - Cons, another port is needed. But it would be welcome anyway in our libs. Results: - Porting was not an easy task as some conditions and kernels were hard to reproduce correctly enough. Still manageable though. - Parallelism was quite easy to get once ported Kudos to Jax the claim is alive. - Ray/communicating with TPU workers proved to be a real pain for us. We don't know if its the tool, the network, or simply our lack of knowledge but it slowed down experiments and work much more than we anticipated. We would launch an experiment that takes 5mn to run, wait for 5mn nothing had happened, 10mn later still nothing, turned out some worker was down/not responding we had to manually get in, figure out what went on, fix it, restart something, and relaunch and we had just lost half an hour. Repeat that enough times, and lost days add up quickly. Let's emphasize that it's not necessarily a critique of the tools we used but the subjective experience we had remains. - No control over compilation Once we had the thing running, we tried several settings to figure out which suited best the inference we had in mind, and it turned out it was really hard to guess from settings what would happen in the latency/throughput. For instance, we had a 0.3 rps on batch_size=1 (so every request/user is on its own) with a latency of 15ms/token (Do not compare too much with other numbers in this article it's on a different machine with a very different profile) which is great, but the overall throughput is not much better than what we had with the old code. So we decided to add batching, and with BS=2 and the latency went up 5 fold, with only 2 times the throughput... Upon further investigation, it turned out that up to batch_size=16 every batch_size had the same latency profile. So we could have 16x more throughput at a 5x latency cost. Not bad, but looking at the numbers we really would have preferred a more fine-grained control. The numbers we were aiming for stem from the [100ms, 1s, 10s, 1mn](https://www.nngroup.com/articles/response-times-3-important-limits/) rule. ### Using ONNX/TRT or other compiled approaches - They are supposed to handle most of the optimization work - Con, Usually parallelism needs to be handled manually. Results: - Turned out that to be able to trace/jit/export stuff we needed to rework part of the PyTorch, so it easily fused with the pure PyTorch approach And overall we figured out that we could have most of the optimizations we desired by staying within PyTorch world, enabling us to keep flexibility without having to make too much coding effort. Another thing to note, since we're running on GPU and text-generation has many forward passes going on, we need the tensors to stay on the GPU, and it is sometimes hard to send your tensors to some lib, be given back the result, perform the logits computation (like argmax or sampling) and feed it back again. Putting the loop within the external lib means losing flexibility just like Jax, so it was not envisioned in our use case. ### DeepSpeed - This is the technology that powered training, it seemed only fair to use it for inference - Cons, it was never used/prepared for inference before. Results: - We had really impressive results fast which are roughly the same as the last iteration we are currently running. - We had to invent a way to put a webserver (so dealing with concurrency) on top of DeepSpeed which also has several processes (one for each GPU). Since there is an excellent library [Mii](https://github.com/microsoft/DeepSpeed-MII). It doesn't fit the extremely flexible goals we had in mind, but we probably would have started working on top of it now. (The current solution is discussed later). - The biggest caveat we encountered with DeepSpeed, was the lack of stability. We had issues when running it on CUDA 11.4 where the code was built for 11.6 And the long-standing issue we could never really fix is that there would be regular kernel crashes (Cuda illegal access, dimensions mismatch, etc..). We fixed a bunch of these but we could never quite achieve stability under stress of our webserver. Despite, that I want to shout out to the Microsoft folks that helped us, we had a really good conversation that improved our understanding of what was happening, and gave us real insights to do some follow-up works. - One of the pain points I feel is that our team is mostly in Europe, while Microsoft is in California, so the collaboration was tricky timewise and we probably lost a big chunk of time because of it. This has nothing to do with the technical part, but it's good to acknowledge that the organizational part of working together is also really important. - Another thing to note, is that DeepSpeed relies on `transformers` to inject its optimization, and since we were updating our code pretty much consistently it made it hard for the DeepSpeed team to keep things working on our `main` branch. We're sorry to have made it hard, I guess this is why it's called bleeding edge. ### Webserver ideas - Given that we are going to run a free server where users are going to send long text, short text, want a few tokens, or a whole recipe each with different parameters, something had to be done here. Results: - We recoded everything in `Rust` with the excellent bindings [tch-rs](https://github.com/LaurentMazare/tch-rs). Rust was not aimed at having performance gains but just much more fine-grained control over parallelism (threads/processes) and playing more fine-grained on the webserver concurrency and the PyTorch one. Python is infamously hard to handle low-level details thanks to the [GIL](https://realpython.com/python-gil/). - Turned out that most of the pain came from the port, and after that, the experimentation was a breeze. And we figured that with enough control over the loops we could have great performance for everyone even in the context of a very wide array of requests with different properties. [Code](https://github.com/Narsil/bloomserver) for the curious, but it doesn't come with any support or nice docs. - It became production for a few weeks because it was more lenient on the parallelism, we could use the GPUs more efficiently (using GPU0 for request 1 while GPU1 is treating request 0). and we went from 0.3 RPS to ~2.5 RPS with the same latency. The optimal case would have been to increase throughput by 16X but the numbers shown here are real workloads measurements so this is not too bad. ### Pure PyTorch - Purely modify the existing code to make it faster by removing operations like `reshape`, using better-optimized kernels so on and so forth. - Con, we have to code TP ourselves and we have a constraint that the code still fits our library (mostly). Results - Next chapter. ## Final route: PyTorch + TP + 1 custom kernel + torch.jit.script ### Writing more efficient PyTorch The first item on the list was removing unnecessary operations in the first implementations Some can be seen by just looking at the code and figuring out obvious flaws: - Alibi is used in Bloom to add position embeddings and it was calculated in too many places, we could only calculate it once and more efficiently. The old code: [link](https://github.com/huggingface/transformers/blob/ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b/src/transformers/models/bloom/modeling_bloom.py#L94-L132) The new code: [link](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L86-L127) This is a 10x speedup and the latest version includes padding too! Since this step is only computed once, the actual speed is not important but overall reducing the number of operations and tensor creation is a good direction. Other parts come out more clearly when you start [profiling](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) and we used quite extensively the [tensorboard extension](https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html) This provides this sort of image which give insights: <img src="assets/bloom-inference-optimization/profiler_simple.png"> Attention takes a lot of time, careful this is a CPU view so the long bars don't mean long, they mean the CPU is awaiting the GPU results of the previous step. <img src="assets/bloom-inference-optimization/profiler.png"> We see many `cat` operations before `baddbmm`. Removing a lot of reshape/transpose, for instance, we figured out that: - The attention is the hot path (it's expected but always good to verify). - In the attention, a lot of kernels were actual copies due to the massive amount of reshapes - We **could** remove the reshapes by reworking the weights themselves and the past. This is a breaking change but it did improve performance quite a bit! ### Supporting TP Ok, we have removed most of the low-hanging fruits now we went roughly from 350ms/token latency to 300ms/token in PP. That's a 15% reduction in latency, but it actually provided more than that, but we were not extremely rigorous in our measuring initially so let's stick to that figure. Then we went on to provide a TP implementation. Turned out to be much faster than we anticipated the implementation took half a day of a single (experienced) dev. The result is [here](https://github.com/huggingface/transformers/tree/thomas/dirty_bloom_tp/src/transformers/models/bloom). We were also able to reuse code from other projects which helped. The latency went directly from 300ms/token to 91ms/token which is a huge improvement in user experience. A simple 20 tokens request went from 6s to 2s which went from a "slow" experience to slightly delayed. Also, the throughput went up a lot to 10RPS. The throughput comes from the fact that running a query in batch_size=1 takes the same time as batch_size=32 and throughput becomes essentially *free* in latency cost at this point. ### Low-hanging fruits Now that we had a TP implementation, we could start profiling and optimizing again. It's a significant enough shift that we had to start from scratch again. The first thing that stood out, is that synchronization (ncclAllReduce) starts to become a preponderant part of the load, which is expected, this is the synchronization part and it **is** taking some time. We never tried to look and optimize this as it's already using `nccl` but there might still be some room for improvement there. We assumed it would be hard to do much better. The second thing is that `Gelu` operator was launching many elementwise kernels and overall it was taking a bigger share of compute than we expected. We made the change from: ```python def bloom_gelu_forward(x): return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) ``` to ```python @torch.jit.script def bloom_gelu_forward(x): return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) ``` This transforms the operations from multiple small element-wise kernels (and hence tensor copies) to a single kernel operation! This provided a 10% latency improvement from 91ms/token to 81ms/token, right there! Be careful though, this is not some magic black box you can just throw everywhere, the kernel fusion will not necessarily happen or the previously used operations are already extremely efficient. Places where we found it worked well: - You have a lot of small/elementwise operations - You have a hotspot with a few hard-to-remove reshape, copies in general - When the fusion happens. ### Epic fail We also had some points, during our testing periods, where we ended up seeing some consistent 25% lower latency for the Rust server compared to the Python one. This was rather odd, but because it was consistently measured, and because removing kernels provided a speed up, we were under the impression that maybe dropping the Python overhead could provide a nice boost. We started a 3-day job to reimplement the necessary parts of `torch.distributed` To get up and running in the Rust world [nccl-rs](https://github.com/Narsil/nccl-rs). We had the version working but something was off in the generations compared to its Python counterpart. During the investigation of the issues, we figured... **that we had forgotten to remove the profiler in the Pytorch measurements**... That was the epic fail because removing it gave us back the 25% and then both codes ran just as fast. This is what we initially expected, that python mustn't be a performance hit, since it's mostly running torch cpp's code. In the end, 3 days is not the end of the world, and it might become useful sometime in the future but still pretty bad. This is quite common when doing optimizations to do wrong or misrepresentative measurements which end up being disappointing or even detrimental to the overall product. This is why doing it in small steps and having expectations about the outcome as soon as possible helps contain that risk. Another place where we had to be extra careful, was the initial forward pass (without past) and the later forward passes (with past). If you optimize the first one, you're most certainly going to be slowing down the later ones which are much more important and account for most of the runtime. Another pretty common culprit is measuring times which are CPU times, and not actual CUDA times, so you need to `torch.cuda.synchronize()` when doing runs to be sure that the kernels complete. ### Custom kernel So far, we had achieved close to DeepSpeed performance without any custom code outside of PyTorch! Pretty neat. We also didn't have to make any compromise on the flexibility of the run time batch size! But given the DeepSpeed experience, we wanted to try and write a custom kernel to fuse a few operations in the hot path where `torch.jit.script` wasn't able to do it for us. Essentially the following two lines: ```python attn_weights = attention_scores.masked_fill_(attention_mask, torch.finfo(attention_scores.dtype).min) attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) ``` The first masked fill is creating a new tensor, which is here only to say to the softmax operator to ignore those values. Also, the softmax needs to be calculated on float32 (for stability) but within a custom kernel, we could limit the amount of upcasting necessary so we limit them to the actual sums and accumulated needed. Code can be found [here](https://github.com/huggingface/transformers/blob/thomas/add_custom_kernels/src/transformers/models/bloom/custom_kernels/fused_bloom_attention_cuda.cu). Keep in mind we had a single GPU architecture to target so we could focus on this and we are not experts (yet) at writing kernels, so there could be better ways to do this. This custom kernel provided yet another 10% latency increase moving down from 81ms/token to 71ms/token latency. All the while keeping our flexibility. After that, we investigated and explored other things like fusing more operators removing other reshapes, or putting them in other places. But no attempt ever made a significant enough impact to make it to the final versions. ### Webserver part Just like the Rust counterpart, we had to implement the batching of requests with different parameters. Since we were in the `PyTorch` world, we have pretty much full control of what's going on. Since we're in Python, we have the limiting factor that the `torch.distributed` needs to run on several processes instead of threads, which means it's slightly harder to communicate between processes. In the end, we opted to communicate raw strings over a Redis pub/sub to distribute the requests to all processes at once. Since we are in different processes it's easier to do it that way than communicating tensors (which are way bigger) for instance. Then we had to drop the use [generate](https://huggingface.co/docs/transformers/v4.22.2/en/main_classes/text_generation#transformers.generation_utils.GenerationMixin.generate) since this applies the parameters to all members of the batch, and we actually want to apply a different set of parameters. Thankfully, we can reuse lower-level items like the [LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor) to save us a lot of work. So we reconstructed a `generate` function that takes a list of parameters and applies them to each member of the batch. Another really important aspect of the final UX is latency. Since we have different parameter sets for different requests, we might have 1 request for 20 tokens and the other for 250 tokens. Since it takes 75ms/token latency one request takes 1.5s and the other 18s. If we were batching all the way, we would be making the user that asked to wait for 18s and making it appear to him as if we were running at 900ms/token which is quite slow! Since we're in a PyTorch world with extreme flexibility, what we can do instead is extract from the batch the first request as soon as we generated to first 20 tokens, and return to that user within the requested 1.5s! We also happen to save 230 tokens worth of computation. So flexibility **is** important to get the best possible latency out there. ## Last notes and crazy ideas Optimization is a never-ending job, and like any other project, 20% of work will usually yield 80% of the results. At some point, we started having a small testing strategy to figure out potential yields of some idea we had, and if the tests didn't yield significant results then we discarded the idea. 1 day for a 10% increase is valuable enough, 2 weeks for 10X is valuable enough. 2 weeks for 10% is not so interesting. ### Have you tried ...? Stuff we know exists and haven't used because of various reasons. It could be it felt like it wasn't adapted to our use case, it was too much work, the yields weren't promising enough, or even simply we had too many options to try out from and discarded some for no particular reasons and just lack of time. The following are in no particular order: - [Cuda graphs](https://developer.nvidia.com/blog/cuda-graphs/) - [nvFuser](https://pytorch.org/tutorials/intermediate/nvfuser_intro_tutorial.html) (This is what powers `torch.jit.script` so we did use it.) - [FasterTransformer](https://github.com/NVIDIA/FasterTransformer) - [Nvidia's Triton](https://developer.nvidia.com/nvidia-triton-inference-server) - [XLA](https://www.tensorflow.org/xla) (Jax is using xla too !) - [torch.fx](https://pytorch.org/docs/stable/fx.html) - [TensorRT](https://developer.nvidia.com/blog/accelerating-inference-up-to-6x-faster-in-pytorch-with-torch-tensorrt/) Please feel free to reach out if your favorite tool is missing from here or if you think we missed out on something important that could prove useful! ### [Flash attention](https://github.com/HazyResearch/flash-attention) We have briefly looked at integrating flash attention, and while it performs extremely well on the first forward pass (without `past_key_values`) it didn't yield as big improvements when running when using `past_key_values`. Since we needed to adapt it to include the `alibi` tensor in the calculation we decide to not do the work (at least not yet). ### [OpenAI Triton](https://openai.com/blog/triton/) [Triton](https://github.com/openai/triton) is a great framework for building custom kernels in Python. We want to get to use it more but we haven't so far. We would be eager to see if it performs better than our Cuda kernel. Writing directly in Cuda seemed like the shortest path for our goal when we considered our options for that part. ### Padding and Reshapes As mentioned throughout this article, every tensor copy has a cost and another hidden cost of running production is padding. When two queries come in with very different lengths, you have to pad (use a dummy token) to make them fit a square. This leads to maybe a lot of unnecessary calculations. [More information](https://huggingface.co/docs/transformers/v4.22.2/en/main_classes/pipelines#pipeline-batching). Ideally, we would be able to *not* do those calculations at all, and never have reshapes. Tensorflow has the concept of [RaggedTensor](https://www.tensorflow.org/guide/ragged_tensor) and Pytorch [Nested tensors](https://pytorch.org/docs/stable/nested.html). Both of these seem not as streamlined as regular tensors but might enable us to do less computation which is always a win. In an ideal world, the entire inference would be written in CUDA or pure GPU implementation. Considering the performance improvements yielded when we could fuse operations it looks desirable. But to what extent this would deliver, we have no idea. If smarter GPU people have ideas we are listening! ## Acknowledgments All this work results of the collaboration of many HF team members. In no particular order, [@ThomasWang](https://huggingface.co/TimeRobber) [@stas](https://huggingface.co/stas) [@Nouamane](https://huggingface.co/nouamanetazi) [@Suraj](https://huggingface.co/valhalla) [@Sanchit](https://huggingface.co/sanchit-gandhi) [@Patrick](https://huggingface.co/patrickvonplaten) [@Younes](/ybelkada) [@Sylvain](https://huggingface.co/sgugger) [@Jeff (Microsoft)](https://github.com/jeffra) [@Reza](https://github.com/RezaYazdaniAminabadi) And all the [BigScience](https://huggingface.co/bigscience) organization.
blog/bloom-inference-optimization.md/0
{ "file_path": "blog/bloom-inference-optimization.md", "repo_id": "blog", "token_count": 8879 }
27
--- title: "Deep Learning over the Internet: Training Language Models Collaboratively" thumbnail: /blog/assets/24_sahajBERT/thumbnail.png authors: - user: mryab guest: true - user: SaulLu --- # Deep Learning over the Internet: Training Language Models Collaboratively <small> With the additional help of Quentin Lhoest and Sylvain Lesage. </small> Modern language models often require a significant amount of compute for pretraining, making it impossible to obtain them without access to tens and hundreds of GPUs or TPUs. Though in theory it might be possible to combine the resources of multiple individuals, in practice, such distributed training methods have previously seen limited success because connection speeds over the Internet are way slower than in high-performance GPU supercomputers. In this blog post, we describe [DeDLOC](https://arxiv.org/abs/2106.10207) — a new method for collaborative distributed training that can adapt itself to the network and hardware constraints of participants. We show that it can be successfully applied in real-world scenarios by pretraining [sahajBERT](https://huggingface.co/neuropark/sahajBERT), a model for the Bengali language, with 40 volunteers. On downstream tasks in Bengali, this model achieves nearly state-of-the-art quality with results comparable to much larger models that used hundreds of high-tier accelerators. <div class="aspect-w-16 aspect-h-9"> <iframe src="https://www.youtube.com/embed/v8ShbLasRF8" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> </div> ## Distributed Deep Learning in Open Collaborations ### Why should we do it? These days, many highest-quality NLP systems are based on large pretrained Transformers. In general, their quality improves with size: you can achieve unparalleled results in natural language understanding and generation by scaling up the parameter count and leveraging the abundance of unlabeled text data. Unfortunately, we use these pretrained models not only because it's convenient. The hardware resources for training Transformers on large datasets often exceed anything affordable to a single person and even most commercial or research organizations. Take, for example, BERT: its training was estimated to cost about $7,000, and for the largest models like GPT-3, this number can be as high as $12 million! This resource limitation might seem obvious and inevitable, but is there really no alternative to using pretrained models for the broader ML community? However, there might be a way out of this situation: to come up with a solution, we only need to take a look around. It might be the case that the computational resources we're looking for are already there; for example, many of us have powerful computers with gaming or workstation GPUs at home. You might've already guessed that we're going to join their power similarly to [Folding@home](https://foldingathome.org/), [Rosetta@home](https://boinc.bakerlab.org/), [Leela Chess Zero](https://lczero.org/) or different [BOINC](https://boinc.berkeley.edu/) projects that leverage volunteer computing, but the approach is even more general. For instance, several laboratories can join their smaller clusters to utilize all the available resources, and some might want to join the experiment using inexpensive cloud instances. To a skeptical mind, it might seem that we're missing a key factor here: data transfer in distributed DL is often a bottleneck, since we need to aggregate the gradients from multiple workers. Indeed, any naïve approach to distributed training over the Internet is bound to fail, as most participants don't have gigabit connections and might disconnect from the network at any time. So how on Earth can you train anything with a household data plan? :) As a solution to this problem, we propose a new training algorithm, called Distributed Deep Learning in Open Collaborations (or **DeDLOC**), which is described in detail in our recently released [preprint](https://arxiv.org/abs/2106.10207). Now, let’s find out what are the core ideas behind this algorithm! ### Training with volunteers In its most frequently used version, distributed training with multiple GPUs is pretty straightforward. Recall that when doing deep learning, you usually compute gradients of your loss function averaged across many examples in a batch of training data. In case of _data-parallel_ distributed DL, you simply split the data across multiple workers, compute gradients separately, and then average them once the local batches are processed. When the average gradient is computed on all workers, we adjust the model weights with the optimizer and continue training our model. You can see an illustration of different tasks that are executed below. ![assets/24_sahajBERT/roles_tasks.png](assets/24_sahajBERT/roles_tasks.png) <div style="line-height:105%;font-size:80%"> <p align="center"> Typical machine learning tasks executed by peers in distributed training, possibly with a separation of roles </p> </div> Often, to reduce the amount of synchronization and to stabilize the learning process, we can accumulate the gradients for N batches before averaging, which is equivalent to increasing the actual batch size N times. This approach, combined with the observation that most state-of-the-art language models use large batches, led us to a simple idea: let's accumulate one _very_ large batch across all volunteer devices before each optimizer step! Along with complete equivalence to regular distributed training and easy scalability, this method also has the benefit of built-in fault tolerance, which we illustrate below. Let's consider a couple of potential failure cases that we might encounter throughout a collaborative experiment. By far, the most frequent scenario is that one or several peers disconnect from the training procedure: they might have an unstable connection or simply want to use their GPUs for something else. In this case, we only suffer a minor setback of training: the contribution of these peers gets deducted from the currently accumulated batch size, but other participants will compensate for that with their gradients. Also, if more peers join, the target batch size will simply be reached faster, and our training procedure will naturally speed up. You can see a demonstration of this in the video: <div class="aspect-w-16 aspect-h-9"> <iframe src="https://www.youtube.com/embed/zdVsg5zsGdc" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> </div> ### Adaptive averaging Now that we have discussed the overall training procedure, there remains one more question: how do we actually aggregate the gradients of participants? Most home computers cannot easily accept incoming connections, and the download speed might also become a constraint. Since we rely on volunteer hardware for experiments, a central server is not really a viable option, as it will quickly face overload when scaling to tens of clients and hundreds of millions of parameters. Most data-parallel training runs today don't use this strategy anyway; instead, they rely on All-Reduce — an efficient all-to-all communication primitive. Thanks to clever algorithmic optimizations, each node can compute the global average without sending the entire local gradient to every peer. Because All-Reduce is decentralized, it seems like a good choice; however, we still need to take the diversity of hardware and network setups into account. For example, some volunteers might join from computers that have slow network but powerful GPUs, some might have better connectivity only to a subset of other peers, and some may be firewalled from incoming connections. It turns out we can actually come up with an optimal data transfer strategy on the fly by leveraging this information about performance! On a high level, we split the entire gradient vector into parts depending on the Internet speed of each peer: those with the fastest connection aggregate the largest parts. Also, if some nodes do not accept incoming connections, they simply send their data for aggregation but do not compute the average themselves. Depending on the conditions, this adaptive algorithm can recover well-known distributed DL algorithms and improve on them with a hybrid strategy, as demonstrated below. ![Adaptative strategy](assets/24_sahajBERT/adaptive.png) <div style="line-height:105%;font-size:80%"> <p align="center"> Examples of different averaging strategies with the adaptive algorithm. </p> </div> <div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black"> <p align="center"> 💡 The core techniques for decentralized training are available in <a href="https://github.com/learning-at-home/hivemind">Hivemind</a>.<br> Check out the repo and learn how to use this library in your own projects! </p> </div><br> ## sahajBERT As always, having a well-designed algorithmic framework doesn't mean that it will work as intended in practice, because some assumptions may not hold true in actual training runs. To verify the competitive performance of this technology and to showcase its potential, we organized a special collaborative event to pretrain a masked language model for the Bengali language. Even though it is the fifth most spoken native language in the world, it has [very few](https://huggingface.co/models?filter=bn&pipeline_tag=fill-mask) masked language models openly available, which emphasizes the importance of tools that can empower the community, unlocking a plethora of opportunities in the field. We conducted this experiment with real volunteers from the Neuropark community and used openly available datasets (OSCAR and Wikipedia), because we wanted to have a fully reproducible example that might serve as an inspiration for other groups. Below, we describe the detailed setup of our training run and demonstrate its results. ### Architecture For our experiment, we chose ALBERT _(A Lite BERT)_ — a model for language representations that is pretrained with Masked Language Modeling (MLM) and Sentence Order Prediction (SOP) as objectives. We use this architecture because weight sharing makes it very parameter-efficient: for example, ALBERT-large has ~18M trainable parameters and performs comparably to BERT-base with ~108M weights on the GLUE benchmark. It means that there is less data to exchange between the peers, which is crucial in our setup, as it significantly speeds up each training iteration. <div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black"> <p align="center"> 💡 Want to know more about ALBERT?<br> <a href="https://arxiv.org/abs/1909.11942">Paper</a><br> <a href="https://huggingface.co/transformers/model_doc/albert.html#albert" >Transformers doc</a > </p> </div> ### Tokenizer The first brick of our model is called a _tokenizer_ and takes care of transforming raw text into vocabulary indices. Because we are training a model for Bengali, which is not very similar to English, we need to implement language-specific preprocessing as a part of our tokenizer. We can view it as a sequence of operations: 1. **Normalization:** includes all preprocessing operations on raw text data. This was the step at which we have made the most changes, because removing certain details can either change the meaning of the text or leave it the same, depending on the language. For example, the standard ALBERT normalizer removes the accents, while for the Bengali language, we need to keep them, because they contain information about the vowels. As a result, we use the following operations: NMT normalization, NFKC normalization, removal of multiple spaces, homogenization of recurring Unicode characters in the Bengali language, and lowercasing. 2. **Pretokenization** describes rules for splitting the input (for example, by whitespace) to enforce specific token boundaries. As in the original work, we have chosen to keep the whitespace out of the tokens. Therefore, to distinguish the words from each other and not to have multiple single-space tokens, each token corresponding to the beginning of a word starts with a special character “\_” (U+2581). In addition, we isolated all punctuation and digits from other characters to condense our vocabulary. 3. **Tokenizer modeling:** It is at this level that the text is mapped into a sequence of elements of a vocabulary. There are several algorithms for this, such as Byte-Pair Encoding (BPE) or Unigram, and most of them need to build the vocabulary from a text corpus. Following the setup of ALBERT, we used the **Unigram Language Model** approach, training a vocabulary of 32k tokens on the deduplicated Bengali part of the OSCAR dataset. 4. **Post-processing:** After tokenization, we might want to add several special tokens required by the architecture, such as starting the sequence with a special token `[CLS]` or separating two segments with a special token `[SEP]`. Since our main architecture is the same as the original ALBERT, we keep the same post-processing: specifically, we add a `[CLS]` token at the beginning of each example and a `[SEP]` token both between two segments and at the end. <div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black"> <p align="center"> 💡 Read more information about each component in <a href="https://huggingface.co/docs/tokenizers/python/latest/components.html#components">Tokenizers doc</a> </p> </div> You can reuse our tokenizer by running the following code: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("neuropark/sahajBERT") ``` ### Dataset The last thing we need to cover is the training dataset. As you probably know, the great strength of pretrained models like BERT or ALBERT is that you don't need an annotated dataset, but just a lot of texts. To train sahajBERT, we used the [Bengali Wikipedia dump from 03/20/2021](https://huggingface.co/datasets/lhoestq/wikipedia_bn) and the Bengali subset of [OSCAR](https://huggingface.co/datasets/oscar) (600MB + 6GB of text). These two datasets can easily be downloaded from the HF Hub. However, loading an entire dataset requires time and storage — two things that our peers do not necessarily have. To make the most of the resources provided by the participants, we have implemented **dataset streaming**, which allows them to train the model nearly as soon as they join the network. Specifically, the examples in the dataset are downloaded and transformed in parallel to the training. We can also shuffle the dataset so that our peers have little chance to process the same examples at the same time. As the dataset is not downloaded and preprocessed in advance, the transformations needed to go from plain text to a training example (shown in the figure below) are done on the fly. ![Create dataset](assets/24_sahajBERT/create_dataset.png) <div style="line-height:105%;font-size:80%"> <p align="center"> From a raw sample to a training sample </p> </div> The dataset streaming mode is available from version v1.9 of the 🤗 datasets library, so you can use it right now as follows: ```python from datasets import load_dataset oscar_dataset = load_dataset("oscar", name="unshuffled_deduplicated_bn", streaming=True) ``` <div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black"> <p align="center"> 💡 Learn more about loading datasets in streaming mode in the <a href="https://huggingface.co/docs/datasets/dataset_streaming.html">documentation</a> </p> </div> ### Collaborative event The sahajBERT collaborative training event took place from May 12 to May 21. The event brought together 40 participants, 30 of whom were Bengali-speaking volunteers, and 10 were volunteers from one of the authors' organizations. These 40 volunteers joined the [Neuropark](https://neuropark.co/) Discord channel to receive all information regarding the event and participate in discussions. To join the experiment, volunteers were asked to: 1. Send their username to the moderators to be allowlisted; 2. Open the provided notebook locally, on Google Colaboratory, or on Kaggle; 3. Run one code cell and fill in their Hugging Face credentials when requested; 4. Watch the training loss decrease on the shared dashboards! For security purposes, we set up an authorization system so that only members of the Neuropark community could train the model. Sparing you the technical details, our authorization protocol allows us to guarantee that every participant is in the allowlist and to acknowledge the individual contribution of each peer. In the following figure, you can see the activity of each volunteer. Over the experiment, the volunteers logged in 600 different sessions. Participants regularly launched multiple runs in parallel, and many of them spread out the runs they launched over time. The runs of individual participants lasted 4 hours on average, and the maximum length was 21 hours. You can read more about the participation statistics in the paper. <iframe width="100%" height="670" frameborder="0" src="https://observablehq.com/embed/@huggingface/sahajbert-bubbles-chart-optimized?cells=c_noaws%2Ct_noaws%2Cviewof+currentDate"></iframe> <div style="line-height:105%;font-size:80%"> <p align="center"> Chart showing participants of the <a href="https://huggingface.co/neuropark/sahajBERT"> sahajBERT</a> experiment. Circle radius is relative to the total number of processed batches, the circle is greyed if the participant is not active. Every purple square represents an active device, darker color corresponds to higher performance </p> </div> Along with the resources provided by participants, we also used 16 preemptible (cheap but frequently interrupted) single-GPU T4 cloud instances to ensure the stability of the run. The cumulative runtime for the experiment was 234 days, and in the figure below you can see parts of the loss curve that each peer contributed to! <p align="center"> <iframe width="80%" height="950" frameborder="0" src="https://observablehq.com/embed/@huggingface/explore-collaborative-training-data-optimized?cells=sessions%2Cviewof+participant%2ClossByParticipant"></iframe> </p> The final model was uploaded to the Model Hub, so you can download and play with it if you want to: [https://hf.co/neuropark/sahajBERT](https://huggingface.co/neuropark/sahajBERT) ### Evaluation To evaluate the performance of sahajBERT, we finetuned it on two downstream tasks in Bengali: - Named entity recognition (NER) on the Bengali split of [WikiANN](https://aclanthology.org/P17-1178/). The goal of this task is to classify each token in the input text into one of the following categories: person, organization, location, or none of them. - News Category Classification (NCC) on the Soham articles dataset from [IndicGLUE](https://aclanthology.org/2020.findings-emnlp.445/). The goal of this task is to predict the category to which belong the input text. We evaluated it during training on the NER task to check that everything was going well; as you can see on the following plot, this was indeed the case! <iframe width="100%" height="476" frameborder="0" src="https://observablehq.com/embed/@huggingface/bengali-exp-eval?cells=evalPlot"></iframe> <div style="line-height:105%;font-size:80%"> <p align="center"> Evaluation metrics of fine-tuned models on the NER task from different checkpoints of pre-trained models. </p> </div> At the end of training, we compared sahajBERT with three other pretrained language models: [XLM-R Large](https://arxiv.org/abs/1911.02116), [IndicBert](https://aclanthology.org/2020.findings-emnlp.445/), and [bnRoBERTa](https://huggingface.co/neuralspace-reverie/indic-transformers-bn-roberta). In the table below, you can see that our model has results comparable to the best Bengali language models available on HF Hub, even though our model has only ~18M trained parameters, while, for instance, XLM-R (a strong multilingual baseline), has ~559M parameters and was trained on several hundred V100 GPUs. | Model | NER F1 (mean ± std) | NCC Accuracy (mean ± std) | |:-------------:|:-------------:|:-------------:| |[sahajBERT](https://huggingface.co/neuropark/sahajBERT) | 95.45 ± 0.53| 91.97 ± 0.47| |[XLM-R-large](https://huggingface.co/xlm-roberta-large) | 96.48 ± 0.22| 90.05 ± 0.38| |[IndicBert](https://huggingface.co/ai4bharat/indic-bert) | 92.52 ± 0.45| 74.46 ± 1.91| |[bnRoBERTa](https://huggingface.co/neuralspace-reverie/indic-transformers-bn-roberta) |82.32 ± 0.67|80.94 ± 0.45| These models are available on the Hub as well. You can test them directly by playing with the Hosted Inference API widget on their Model Cards or by loading them directly in your Python code. #### sahajBERT-NER Model card: [https://hf.co/neuropark/sahajBERT-NER](https://hf.co/neuropark/sahajBERT-NER) ```python from transformers import ( AlbertForTokenClassification, TokenClassificationPipeline, PreTrainedTokenizerFast, ) # Initialize tokenizer tokenizer = PreTrainedTokenizerFast.from_pretrained("neuropark/sahajBERT-NER") # Initialize model model = AlbertForTokenClassification.from_pretrained("neuropark/sahajBERT-NER") # Initialize pipeline pipeline = TokenClassificationPipeline(tokenizer=tokenizer, model=model) raw_text = "এই ইউনিয়নে ৩ টি মৌজা ও ১০ টি গ্রাম আছে ।" # Change me output = pipeline(raw_text) ``` #### sahajBERT-NCC Model card: [https://hf.co/neuropark/sahajBERT-NER](https://hf.co/neuropark/sahajBERT-NCC) ```python from transformers import ( AlbertForSequenceClassification, TextClassificationPipeline, PreTrainedTokenizerFast, ) # Initialize tokenizer tokenizer = PreTrainedTokenizerFast.from_pretrained("neuropark/sahajBERT-NCC") # Initialize model model = AlbertForSequenceClassification.from_pretrained("neuropark/sahajBERT-NCC") # Initialize pipeline pipeline = TextClassificationPipeline(tokenizer=tokenizer, model=model) raw_text = "এই ইউনিয়নে ৩ টি মৌজা ও ১০ টি গ্রাম আছে ।" # Change me output = pipeline(raw_text) ``` ## Conclusion In this blog post, we have discussed the method that can enable collaborative pretraining of neural networks with sahajBERT as the first truly successful example of applying it to a real-world problem. What does this all mean for the broader ML community? First, it is now possible to run large-scale distributed pretraining with your friends, and we hope to see a lot of cool new models that were previously less feasible to obtain. Also, our result might be important for multilingual NLP, since now the community for any language can train their own models without the need for significant computational resources concentrated in one place. ## Acknowledgements The DeDLOC paper and sahajBERT training experiment were created by Michael Diskin, Alexey Bukhtiyarov, Max Ryabinin, Lucile Saulnier, Quentin Lhoest, Anton Sinitsin, Dmitry Popov, Dmitry Pyrkin, Maxim Kashirin, Alexander Borzunov, Albert Villanova del Moral, Denis Mazur, Ilia Kobelev, Yacine Jernite, Thomas Wolf, and Gennady Pekhimenko. This project is the result of a collaboration between [Hugging Face](https://huggingface.co/), [Yandex Research](https://research.yandex.com/), [HSE University](https://www.hse.ru/en/), [MIPT](https://mipt.ru/english/), [University of Toronto](https://www.utoronto.ca/) and [Vector Institute](https://vectorinstitute.ai/). In addition, we would like to thank Stas Bekman, Dmitry Abulkhanov, Roman Zhytar, Alexander Ploshkin, Vsevolod Plokhotnyuk and Roman Kail for their invaluable help with building the training infrastructure. Also, we thank Abhishek Thakur for helping with downstream evaluation and Tanmoy Sarkar with Omar Sanseviero, who helped us organize the collaborative experiment and gave regular status updates to the participants over the course of the training run. Below, you can see all participants of the collaborative experiment: <iframe width="100%" height="380" frameborder="0" src="https://observablehq.com/embed/89470ece1dda817b?cells=humanParticipants"></iframe> ## References "Distributed Deep Learning in Open Collaborations", [ArXiv](https://arxiv.org/abs/2106.10207) Code for [sahajBERT experiments](https://github.com/yandex-research/DeDLOC/tree/main/sahajbert) in the DeDLOC repository.
blog/collaborative-training.md/0
{ "file_path": "blog/collaborative-training.md", "repo_id": "blog", "token_count": 6446 }
28
--- title: "Large-scale Near-deduplication Behind BigCode" thumbnail: /blog/assets/dedup/thumbnail.png authors: - user: chenghao --- # Large-scale Near-deduplication Behind BigCode ## Intended Audience People who are interested in document-level near-deduplication at a large scale, and have some understanding of hashing, graph and text processing. ## Motivations It is important to take care of our data before feeding it to the model, at least Large Language Model in our case, as the old saying goes, garbage in, garbage out. Even though it's increasingly difficult to do so with headline-grabbing models (or should we say APIs) creating an illusion that data quality matters less. One of the problems we face in both BigScience and BigCode for data quality is duplication, including possible benchmark contamination. It has been shown that models tend to output training data verbatim when there are many duplicates[[1]](#1) (though it is less clear in some other domains[[2]](#2)), and it also makes the model vulnerable to privacy attacks[[1]](#1). Additionally, some typical advantages of deduplication also include: 1. Efficient training: You can achieve the same, and sometimes better, performance with less training steps[[3]](#3) [[4]](#4). 2. Prevent possible data leakage and benchmark contamination: Non-zero duplicates discredit your evaluations and potentially make so-called improvement a false claim. 3. Accessibility. Most of us cannot afford to download or transfer thousands of gigabytes of text repeatedly, not to mention training a model with it. Deduplication, for a fix-sized dataset, makes it easier to study, transfer and collaborate with. ## From BigScience to BigCode Allow me to share a story first on how I jumped on this near-deduplication quest, how the results have progressed, and what lessons I have learned along the way. It all started with a conversation on LinkedIn when [BigScience](https://bigscience.huggingface.co/) had already started for a couple of months. Huu Nguyen approached me when he noticed my pet project on GitHub, asking me if I were interested in working on deduplication for BigScience. Of course, my answer was a yes, completely ignorant of just how much effort will be required alone due to the sheer mount of the data. It was fun and challenging at the same time. It is challenging in a sense that I didn't really have much research experience with that sheer scale of data, and everyone was still welcoming and trusting you with thousands of dollars of cloud compute budget. Yes, I had to wake up from my sleep to double-check that I had turned off those machines several times. As a result, I had to learn on the job through all the trials and errors, which in the end opened me to a new perspective that I don't think I would ever have if it weren't for BigScience. Moving forward, one year later, I am putting what I have learned back into [BigCode](https://www.bigcode-project.org/), working on even bigger datasets. In addition to LLMs that are trained for English[[3]](#3), we have confirmed that deduplication improves code models too[[4]](#4), while using a much smaller dataset. And now, I am sharing what I have learned with you, my dear reader, and hopefully, you can also get a sense of what is happening behind the scene of BigCode through the lens of deduplication. In case you are interested, here is an updated version of the deduplication comparison that we started in BigScience: | Dataset | Input Size | Output Size or Deduction | Level | Method | Parameters | Language | Time | | ------------------------------------ | -------------------------------- | --------------------------------------------------------------- | --------------------- | --------------------------------------------- | ---------------------------------------------------------------- | ------------ | ------------------- | | OpenWebText2[[5]](#5) | After URL dedup: 193.89 GB (69M) | After MinHashLSH: 65.86 GB (17M) | URL + Document | URL(Exact) + Document(MinHash LSH) | \\( (10, 0.5, ?, ?, ?) \\) | English | | | Pile-CC[[5]](#5) | _~306 GB_ | _227.12 GiB (~55M)_ | Document | Document(MinHash LSH) | \\( (10, 0.5, ?, ?, ?) \\) | English | "several days" | | BNE5[[6]](#6) | 2TB | 570 GB | Document | Onion | 5-gram | Spanish | | | MassiveText[[7]](#7) | | 0.001 TB ~ 2.1 TB | Document | Document(Exact + MinHash LSH) | \\( (?, 0.8, 13, ?, ?) \\) | English | | | CC100-XL[[8]](#8) | | 0.01 GiB ~ 3324.45 GiB | URL + Paragraph | URL(Exact) + Paragraph(Exact) | SHA-1 | Multilingual | | | C4[[3]](#3) | 806.92 GB (364M) | 3.04% ~ 7.18% **↓** (train) | Substring or Document | Substring(Suffix Array) or Document(MinHash) | Suffix Array: 50-token, MinHash: \\( (9000, 0.8, 5, 20, 450) \\) | English | | | Real News[[3]](#3) | ~120 GiB | 13.63% ~ 19.4% **↓** (train) | Same as **C4** | Same as **C4** | Same as **C4** | English | | | LM1B[[3]](#3) | ~4.40 GiB (30M) | 0.76% ~ 4.86% **↓** (train) | Same as **C4** | Same as **C4** | Same as **C4** | English | | | WIKI40B[[3]](#3) | ~2.9M | 0.39% ~ 2.76% **↓** (train) | Same as **C4** | Same as **C4** | Same as **C4** | English | | | The BigScience ROOTS Corpus[[9]](#9) | | 0.07% ~ 2.7% **↓** (document) + 10.61%~32.30% **↓** (substring) | Document + Substring | Document (SimHash) + Substring (Suffix Array) | SimHash: 6-grams, hamming distance of 4, Suffix Array: 50-token | Multilingual | 12 hours ~ few days | This is the one for code datasets we created for BigCode as well. Model names are used when the dataset name isn't available. | Model | Method | Parameters | Level | | --------------------- | -------------------- | -------------------------------------- | -------- | | InCoder[[10]](#10) | Exact | Alphanumeric tokens/md5 + Bloom filter | Document | | CodeGen[[11]](#11) | Exact | SHA256 | Document | | AlphaCode[[12]](#12) | Exact | ignore whiespaces | Document | | PolyCode[[13]](#13) | Exact | SHA256 | Document | | PaLM Coder[[14]](#14) | Levenshtein distance | | Document | | CodeParrot[[15]](#15) | MinHash + LSH | \\( (256, 0.8, 1) \\) | Document | | The Stack[[16]](#16) | MinHash + LSH | \\( (256, 0.7, 5) \\) | Document | MinHash + LSH parameters \\( (P, T, K, B, R) \\): 1. \\( P \\) number of permutations/hashes 2. \\( T \\) Jaccard similarity threshold 3. \\( K \\) n-gram/shingle size 4. \\( B \\) number of bands 5. \\( R \\) number of rows To get a sense of how those parameters might impact your results, here is a simple demo to illustrate the computation mathematically: [MinHash Math Demo](https://huggingface.co/spaces/bigcode/near-deduplication). ## MinHash Walkthrough In this section, we will cover each step of MinHash, the one used in BigCode, and potential scaling issues and solutions. We will demonstrate the workflow via one example of three documents in English: | doc_id | content | | ------ | ---------------------------------------- | | 0 | Deduplication is so much fun! | | 1 | Deduplication is so much fun and easy! | | 2 | I wish spider dog[[17]](#17) is a thing. | The typical workflow of MinHash is as follows: 1. Shingling (tokenization) and fingerprinting (MinHashing), where we map each document into a set of hashes. 2. Locality-sensitive hashing (LSH). This step is to reduce the number of comparisons by grouping documents with similar bands together. 3. Duplicate removal. This step is where we decide which duplicated documents to keep or remove. ### Shingles Like in most applications involving text, we need to begin with tokenization. N-grams, a.k.a. shingles, are often used. In our example, we will be using word-level tri-grams, without any punctuations. We will circle back to how the size of ngrams impacts the performance in a later section. | doc_id | shingles | | ------ | ------------------------------------------------------------------------------- | | 0 | {"Deduplication is so", "is so much", "so much fun"} | | 1 | {'so much fun', 'fun and easy', 'Deduplication is so', 'is so much'} | | 2 | {'dog is a', 'is a thing', 'wish spider dog', 'spider dog is', 'I wish spider'} | This operation has a time complexity of \\( \mathcal{O}(NM) \\) where \\( N \\) is the number of documents and \\( M \\) is the length of the document. In other words, it is linearly dependent on the size of the dataset. This step can be easily scaled by parallelization by multiprocessing or distributed computation. ### Fingerprint Computation In MinHash, each shingle will typically either be 1) hashed multiple times with different hash functions, or 2) permuted multiple times using one hash function. Here, we choose to permute each hash 5 times. More variants of MinHash can be found in [MinHash - Wikipedia](https://en.wikipedia.org/wiki/MinHash?useskin=vector). | shingle | permuted hashes | | ------------------- | ----------------------------------------------------------- | | Deduplication is so | [403996643, 2764117407, 3550129378, 3548765886, 2353686061] | | is so much | [3594692244, 3595617149, 1564558780, 2888962350, 432993166] | | so much fun | [1556191985, 840529008, 1008110251, 3095214118, 3194813501] | Taking the minimum value of each column within each document — the "Min" part of the "MinHash", we arrive at the final MinHash for this document: | doc_id | minhash | | ------ | ---------------------------------------------------------- | | 0 | [403996643, 840529008, 1008110251, 2888962350, 432993166] | | 1 | [403996643, 840529008, 1008110251, 1998729813, 432993166] | | 2 | [166417565, 213933364, 1129612544, 1419614622, 1370935710] | Technically, we don't have to use the minimum value of each column, but the minimum value is the most common choice. Other order statistics such as maximum, kth smallest, or kth largest can be used as well[[21]](#21). In implementation, you can easily vectorize these steps with `numpy` and expect to have a time complexity of \\( \mathcal{O}(NMK) \\) where \\( K \\) is your number of permutations. Code modified based on [Datasketch](https://github.com/ekzhu/datasketch). ```python def embed_func( content: str, idx: int, *, num_perm: int, ngram_size: int, hashranges: List[Tuple[int, int]], permutations: np.ndarray, ) -> Dict[str, Any]: a, b = permutations masks: np.ndarray = np.full(shape=num_perm, dtype=np.uint64, fill_value=MAX_HASH) tokens: Set[str] = {" ".join(t) for t in ngrams(NON_ALPHA.split(content), ngram_size)} hashvalues: np.ndarray = np.array([sha1_hash(token.encode("utf-8")) for token in tokens], dtype=np.uint64) permuted_hashvalues = np.bitwise_and( ((hashvalues * np.tile(a, (len(hashvalues), 1)).T).T + b) % MERSENNE_PRIME, MAX_HASH ) hashvalues = np.vstack([permuted_hashvalues, masks]).min(axis=0) Hs = [bytes(hashvalues[start:end].byteswap().data) for start, end in hashranges] return {"__signatures__": Hs, "__id__": idx} ``` If you are familiar with [Datasketch](https://github.com/ekzhu/datasketch), you might ask, why do we bother to strip all the nice high-level functions the library provides? It is not because we want to avoid adding dependencies, but because we intend to squeeze as much CPU computation as possible during parallelization. Fusing few steps into one function call enables us to utilize our compute resources better. Since one document's calculation is not dependent on anything else. A good parallelization choice would be using the `map` function from the `datasets` library: ```python embedded = ds.map( function=embed_func, fn_kwargs={ "num_perm": args.num_perm, "hashranges": HASH_RANGES, "ngram_size": args.ngram, "permutations": PERMUTATIONS, }, input_columns=[args.column], remove_columns=ds.column_names, num_proc=os.cpu_count(), with_indices=True, desc="Fingerprinting...", ) ``` After the fingerprint calculation, one particular document is mapped to one array of integer values. To figure out what documents are similar to each other, we need to group them based on such fingerprints. Entering the stage, **Locality Sensitive Hashing (LSH)**. ### Locality Sensitive Hashing LSH breaks the fingerprint array into bands, each band containing the same number of rows. If there is any hash values left, it will be ignored. Let's use \\( b=2 \\) bands and \\( r=2 \\) rows to group those documents: | doc_id | minhash | bands | | ------ | ---------------------------------------------------------- | ------------------------------------------------------ | | 0 | [403996643, 840529008, 1008110251, 2888962350, 432993166] | [0:[403996643, 840529008], 1:[1008110251, 2888962350]] | | 1 | [403996643, 840529008, 1008110251, 1998729813, 432993166] | [0:[403996643, 840529008], 1:[1008110251, 1998729813]] | | 2 | [166417565, 213933364, 1129612544, 1419614622, 1370935710] | [0:[166417565, 213933364], 1:[1129612544, 1419614622]] | If two documents share the same hashes in a band at a particular location (band index), they will be clustered into the same bucket and will be considered as candidates. | band index | band value | doc_ids | | ---------- | ------------------------ | ------- | | 0 | [403996643, 840529008] | 0, 1 | | 1 | [1008110251, 2888962350] | 0 | | 1 | [1008110251, 1998729813] | 1 | | 0 | [166417565, 213933364] | 2 | | 1 | [1129612544, 1419614622] | 2 | For each row in the `doc_ids` column, we can generate candidate pairs by pairing every two of them. From the above table, we can generate one candidate pair: `(0, 1)`. ### Beyond Duplicate Pairs This is where many deduplication descriptions in papers or tutorials stop. We are still left with the question of what to do with them. Generally, we can proceed with two options: 1. Double-check their actual Jaccard similarities by calculating their shingle overlap, due to the estimation nature of MinHash. The Jaccard Similarity of two sets is defined as the size of the intersection divided by the size of the union. And now it becomes much more doable than computing all-pair similarities, because we can focus only for documents within a cluster. This is also what we initially did for BigCode, which worked reasonably well. 2. Treat them as true positives. You probably already noticed the issue here: the Jaccard similarity isn't transitive, meaning \\( A \\) is similar to \\( B \\) and \\( B \\) is similar to \\( C \\), but \\( A \\) and \\( C \\) do not necessary share the similarity. However, our experiments from The Stack show that treating all of them as duplicates improves the downstream model's performance the best. And now we gradually moved towards this method instead, and it saves time as well. But to apply this to your dataset, we still recommend going over your dataset and looking at your duplicates, and then making a data-driven decision. From such pairs, whether they are validated or not, we can now construct a graph with those pairs as edges, and duplicates will be clustered into communities or connected components. In terms of implementation, unfortunately, this is where `datasets` couldn't help much because now we need something like a `groupby` where we can cluster documents based on their _band offset_ and _band values_. Here are some options we have tried: **Option 1: Iterate the datasets the old-fashioned way and collect edges. Then use a graph library to do community detection or connected component detection.** This did not scale well in our tests, and the reasons are multifold. First, iterating the whole dataset is slow and memory consuming at a large scale. Second, popular graph libraries like `graphtool` or `networkx` have a lot of overhead for graph creation. **Option 2: Use popular python frameworks such as `dask` to allow more efficient `groupby` operations**. But then you still have problems of slow iteration and slow graph creation. **Option 3: Iterate the dataset, but use a union find data structure to cluster documents.** This adds negligible overhead to the iteration, and it works relatively well for medium datasets. ```python for table in tqdm(HASH_TABLES, dynamic_ncols=True, desc="Clustering..."): for cluster in table.values(): if len(cluster) <= 1: continue idx = min(cluster) for x in cluster: uf.union(x, idx) ``` **Option 4: For large datasets, use Spark.** We already know that steps up to the LSH part can be parallelized, which is also achievable in Spark. In addition to that, Spark supports distributed `groupBy` out of the box, and it is also straightforward to implement algorithms like [[18]](#18) for connected component detection. If you are wondering why we didn't use Spark's implementation of MinHash, the answer is that all our experiments so far stemmed from [Datasketch](https://github.com/ekzhu/datasketch), which uses an entirely different implementation than Spark, and we want to ensure that we carry on the lessons and insights learned from that without going into another rabbit hole of ablation experiments. ```python edges = ( records.flatMap( lambda x: generate_hash_values( content=x[1], idx=x[0], num_perm=args.num_perm, ngram_size=args.ngram_size, hashranges=HASH_RANGES, permutations=PERMUTATIONS, ) ) .groupBy(lambda x: (x[0], x[1])) .flatMap(lambda x: generate_edges([i[2] for i in x[1]])) .distinct() .cache() ) ``` A simple connected component algorithm based on [[18]](#18) implemented in Spark. ```python a = edges while True: b = a.flatMap(large_star_map).groupByKey().flatMap(large_star_reduce).distinct().cache() a = b.map(small_star_map).groupByKey().flatMap(small_star_reduce).distinct().cache() changes = a.subtract(b).union(b.subtract(a)).collect() if len(changes) == 0: break results = a.collect() ``` Additionally, thanks to cloud providers, we can set up Spark clusters like a breeze with services like GCP DataProc. **In the end, we can comfortably run the program to deduplicate 1.4 TB of data in just under 4 hours with a budget of $15 an hour.** ## Quality Matters Scaling a ladder doesn't get us to the moon. That's why we need to make sure this is the right direction, and we are using it the right way. Early on, our parameters were largely inherited from the CodeParrot experiments, and our ablation experiment indicated that those settings did improve the model's downstream performance[[16]](#16). We then set to further explore this path and can confirm that[[4]](#4): 1. Near-deduplication improves the model's downstream performance with a much smaller dataset (6 TB VS. 3 TB) 2. We haven't figured out the limit yet, but a more aggressive deduplication (6 TB VS. 2.4 TB) can improve the performance even more: 1. Lower the similarity threshold 2. Increase the shingle size (unigram → 5-gram) 3. Ditch false positive checking because we can afford to lose a small percentage of false positives ![A violin chart showing unigram impact in different settings](https://huggingface.co/datasets/chenghao/dedup_blog_assets/resolve/main/data/violin_chart_1.png) ![A violin chart showing unigram impact in different settings](https://huggingface.co/datasets/chenghao/dedup_blog_assets/resolve/main/data/violin_chart_2.png) <center> Image: Two graphs showing the impact of similarity threshold and shingle size, the first one is using unigram and the second one 5-gram. The red dash line shows the similarity cutoff: any documents below would be considered as false positives — their similarities with other documents within a cluster are lower than the threshold. </center> These graphs can help us understand why it was necessary to double-check the false positives for CodeParrot and early version of the Stack: using unigram creates many false positives; They also demonstrate that by increasing the shingle size to 5-gram, the percentage of false positives decreases significantly. A smaller threshold is desired if we want to keep the deduplication aggressiveness. Additional experiments also showed that lowering the threshold removes more documents that have high similarity pairs, meaning an increased recall in the segment we actually would like to remove the most. ## Scaling ![Scaling results for dataset size and deduplication time](https://huggingface.co/datasets/chenghao/dedup_blog_assets/resolve/main/data/scale.png) <center>Image: Deduplication time versus raw dataset size. This is achieved with 15 worker c2d-standard-16 machines on GCP, and each costed around $0.7 per hour. </center> ![CPU usage screenshot for the cluster during processing JSON dataset](https://huggingface.co/datasets/chenghao/dedup_blog_assets/resolve/main/data/usage.png) <center>Image: CPU usage screenshot for the cluster during processing JSON dataset.</center> This isn't the most rigorous scaling proof you can find, but the deduplication time, given a fixed computation budget, looks practically linear to the physical size of the dataset. When you take a closer look at the cluster resource usage when processing JSON dataset, the largest subset in the Stack, you can see the MinHash + LSH (stage 2) dominated the total real computation time (stage 2 + 3), which from our previous analysis is \\( \mathcal{O}(NM) \\) — linear to the dataset physical volume. ## Proceed with Caution Deduplication doesn't exempt you from thorough data exploration and analysis. In addition, these deduplication discoveries hold true for the Stack, but it does not mean it is readily applicable to other datasets or languages. It is a good first step towards building a better dataset, and further investigations such as data quality filtering (e.g., vulnerability, toxicity, bias, generated templates, PII) are still much needed. We still encourage you to perform similar analysis on your datasets before training. For example, it might not be very helpful to do deduplication if you have tight time and compute budget: [@geiping_2022](http://arxiv.org/abs/2212.14034) mentions that substring deduplication didn't improve their model's downstream performance. Existing datasets might also require thorough examination before use, for example, [@gao_2020](http://arxiv.org/abs/2101.00027) states that they only made sure the Pile itself, along with its splits, are deduplicated, and they won't proactively deduplicating for any downstream benchmarks and leave that decision to readers. In terms of data leakage and benchmark contamination, there is still much to explore. We had to retrain our code models because HumanEval was published in one of the GitHub repos in Python. Early near-deduplication results also suggest that MBPP[[19]](#19), one of the most popular benchmarks for coding, shares a lot of similarity with many Leetcode problems (e.g., task 601 in MBPP is basically Leetcode 646, task 604 ≃ Leetcode 151.). And we all know GitHub is no short of those coding challenges and solutions. It will be even more difficult if someone with bad intentions upload all the benchmarks in the form of python scripts, or other less obvious ways, and pollute all your training data. ## Future Directions 1. Substring deduplication. Even though it showed some benefits for English[[3]](#3), it is not clear yet if this should be applied to code data as well; 2. Repetition: paragraphs that are repeated multiple times in one document. [@rae_2021](http://arxiv.org/abs/2112.11446) shared some interesting heuristics on how to detect and remove them. 3. Using model embeddings for semantic deduplication. It is another whole research question with scaling, cost, ablation experiments, and trade-off with near-deduplication. There are some intriguing takes on this[[20]](#20), but we still need more situated evidence to draw a conclusion (e.g, [@abbas_2023](http://arxiv.org/abs/2303.09540)'s only text deduplication reference is [@lee_2022a](http://arxiv.org/abs/2107.06499), whose main claim is deduplicating helps instead of trying to be SOTA). 4. Optimization. There is always room for optimization: better quality evaluation, scaling, downstream performance impact analysis etc. 5. Then there is another direction to look at things: To what extent near-deduplication starts to hurt performance? To what extent similarity is needed for diversity instead of being considered as redundancy? ## Credits The banner image contains emojis (hugging face, Santa, document, wizard, and wand) from Noto Emoji (Apache 2.0). This blog post is proudly written without any generative APIs. Huge thanks to Huu Nguyen @Huu and Hugo Laurençon @HugoLaurencon for the collaboration in BigScience and everyone at BigCode for the help along the way! If you ever find any error, feel free to contact me: mouchenghao at gmail dot com. ## Supporting Resources - [Datasketch](https://github.com/ekzhu/datasketch) (MIT) - [simhash-py](https://github.com/seomoz/simhash-py/tree/master/simhash) and [simhash-cpp](https://github.com/seomoz/simhash-cpp) (MIT) - [Deduplicating Training Data Makes Language Models Better](https://github.com/google-research/deduplicate-text-datasets) (Apache 2.0) - [Gaoya](https://github.com/serega/gaoya) (MIT) - [BigScience](https://github.com/bigscience-workshop) (Apache 2.0) - [BigCode](https://github.com/bigcode-project) (Apache 2.0) ## References - <a id="1">[1]</a> : Nikhil Kandpal, Eric Wallace, Colin Raffel, [Deduplicating Training Data Mitigates Privacy Risks in Language Models](http://arxiv.org/abs/2202.06539), 2022 - <a id="2">[2]</a> : Gowthami Somepalli, et al., [Diffusion Art or Digital Forgery? Investigating Data Replication in Diffusion Models](http://arxiv.org/abs/2212.03860), 2022 - <a id="3">[3]</a> : Katherine Lee, Daphne Ippolito, et al., [Deduplicating Training Data Makes Language Models Better](http://arxiv.org/abs/2107.06499), 2022 - <a id="4">[4]</a> : Loubna Ben Allal, Raymond Li, et al., [SantaCoder: Don't reach for the stars!](http://arxiv.org/abs/2301.03988), 2023 - <a id="5">[5]</a> : Leo Gao, Stella Biderman, et al., [The Pile: An 800GB Dataset of Diverse Text for Language Modeling](http://arxiv.org/abs/2101.00027), 2020 - <a id="6">[6]</a> : Asier Gutiérrez-Fandiño, Jordi Armengol-Estapé, et al., [MarIA: Spanish Language Models](http://arxiv.org/abs/2107.07253), 2022 - <a id="7">[7]</a> : Jack W. Rae, Sebastian Borgeaud, et al., [Scaling Language Models: Methods, Analysis & Insights from Training Gopher](http://arxiv.org/abs/2112.11446), 2021 - <a id="8">[8]</a> : Xi Victoria Lin, Todor Mihaylov, et al., [Few-shot Learning with Multilingual Language Models](http://arxiv.org/abs/2112.10668), 2021 - <a id="9">[9]</a> : Hugo Laurençon, Lucile Saulnier, et al., [The BigScience ROOTS Corpus: A 1.6TB Composite Multilingual Dataset](https://openreview.net/forum?id=UoEw6KigkUn), 2022 - <a id="10">[10]</a> : Daniel Fried, Armen Aghajanyan, et al., [InCoder: A Generative Model for Code Infilling and Synthesis](http://arxiv.org/abs/2204.05999), 2022 - <a id="11">[11]</a> : Erik Nijkamp, Bo Pang, et al., [CodeGen: An Open Large Language Model for Code with Multi-Turn Program Synthesis](http://arxiv.org/abs/2203.13474), 2023 - <a id="12">[12]</a> : Yujia Li, David Choi, et al., [Competition-Level Code Generation with AlphaCode](http://arxiv.org/abs/2203.07814), 2022 - <a id="13">[13]</a> : Frank F. Xu, Uri Alon, et al., [A Systematic Evaluation of Large Language Models of Code](http://arxiv.org/abs/2202.13169), 2022 - <a id="14">[14]</a> : Aakanksha Chowdhery, Sharan Narang, et al., [PaLM: Scaling Language Modeling with Pathways](http://arxiv.org/abs/2204.02311), 2022 - <a id="15">[15]</a> : Lewis Tunstall, Leandro von Werra, Thomas Wolf, [Natural Language Processing with Transformers, Revised Edition](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/), 2022 - <a id="16">[16]</a> : Denis Kocetkov, Raymond Li, et al., [The Stack: 3 TB of permissively licensed source code](http://arxiv.org/abs/2211.15533), 2022 - <a id="17">[17]</a> : [Rocky | Project Hail Mary Wiki | Fandom](https://projecthailmary.fandom.com/wiki/Rocky) - <a id="18">[18]</a> : Raimondas Kiveris, Silvio Lattanzi, et al., [Connected Components in MapReduce and Beyond](https://doi.org/10.1145/2670979.2670997), 2014 - <a id="19">[19]</a> : Jacob Austin, Augustus Odena, et al., [Program Synthesis with Large Language Models](http://arxiv.org/abs/2108.07732), 2021 - <a id="20">[20]</a>: Amro Abbas, Kushal Tirumala, et al., [SemDeDup: Data-efficient learning at web-scale through semantic deduplication](http://arxiv.org/abs/2303.09540), 2023 - <a id="21">[21]</a>: Edith Cohen, [MinHash Sketches : A Brief Survey](http://www.cohenwang.com/edith/Surveys/minhash.pdf), 2016
blog/dedup.md/0
{ "file_path": "blog/dedup.md", "repo_id": "blog", "token_count": 11090 }
29
--- title: "Happy 1st anniversary 🤗 Diffusers!" thumbnail: /blog/assets/diffusers-turns-1/diffusers-turns-1.png authors: - user: stevhliu - user: sayakpaul - user: pcuenq --- # Happy 1st anniversary 🤗 Diffusers! 🤗 Diffusers is happy to celebrate its first anniversary! It has been an exciting year, and we're proud and grateful for how far we've come thanks to our community and open-source contributors. Last year, text-to-image models like DALL-E 2, Imagen, and Stable Diffusion captured the world's attention with their ability to generate stunningly photorealistic images from text, sparking a massive surge of interest and development in generative AI. But access to these powerful models was limited. At Hugging Face, our mission is to democratize good machine learning by collaborating and helping each other build an open and ethical AI future together. Our mission motivated us to create the 🤗 Diffusers library so *everyone* can experiment, research, or simply play with text-to-image models. That’s why we designed the library as a modular toolbox, so you can customize a diffusion model’s components or just start using it out-of-the-box. As 🤗 Diffusers turns 1, here’s an overview of some of the most notable features we’ve added to the library with the help of our community. We are proud and immensely grateful for being part of an engaged community that promotes accessible usage, pushes diffusion models beyond just text-to-image generation, and is an all-around inspiration. **Table of Contents** * [Striving for photorealism](#striving-for-photorealism) * [Video pipelines](#video-pipelines) * [Text-to-3D models](#text-to-3d-models) * [Image editing pipelines](#image-editing-pipelines) * [Faster diffusion models](#faster-diffusion-models) * [Ethics and safety](#ethics-and-safety) * [Support for LoRA](#support-for-lora) * [Torch 2.0 optimizations](#torch-20-optimizations) * [Community highlights](#community-highlights) * [Building products with 🤗 Diffusers](#building-products-with-🤗-diffusers) * [Looking forward](#looking-forward) ## Striving for photorealism Generative AI models are known for creating photorealistic images, but if you look closely, you may notice certain things that don't look right, like generating extra fingers on a hand. This year, the DeepFloyd IF and Stability AI SDXL models made a splash by improving the quality of generated images to be even more photorealistic. [DeepFloyd IF](https://stability.ai/blog/deepfloyd-if-text-to-image-model) - A modular diffusion model that includes different processes for generating an image (for example, an image is upscaled 3x to produce a higher resolution image). Unlike Stable Diffusion, the IF model works directly on the pixel level, and it uses a large language model to encode text. [Stable Diffusion XL (SDXL)](https://stability.ai/blog/sdxl-09-stable-diffusion) - The latest Stable Diffusion model from Stability AI, with significantly more parameters than its predecessor Stable Diffusion 2. It generates hyper-realistic images, leveraging a base model for close adherence to the prompt, and a refiner model specialized in the fine details and high-frequency content. Head over to the DeepFloyd IF [docs](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/if#texttoimage-generation) and the SDXL [docs](https://huggingface.co/docs/diffusers/v0.18.2/en/api/pipelines/stable_diffusion/stable_diffusion_xl) today to learn how to start generating your own images! ## Video pipelines Text-to-image pipelines are cool, but text-to-video is even cooler! We currently support two text-to-video pipelines, [VideoFusion](https://huggingface.co/docs/diffusers/main/en/api/pipelines/text_to_video) and [Text2Video-Zero](https://huggingface.co/docs/diffusers/main/en/api/pipelines/text_to_video_zero). If you’re already familiar with text-to-image pipelines, using a text-to-video pipeline is very similar: ```py import torch from diffusers import DiffusionPipeline from diffusers.utils import export_to_video pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = "Darth Vader surfing a wave" video_frames = pipe(prompt, num_frames=24).frames video_path = export_to_video(video_frames) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/darthvader_cerpense.gif" alt="Generated video of Darth Vader surfing."/> </div> We expect text-to-video to go through a revolution during 🤗 Diffusers second year, and we are excited to see what the community builds on top of these to push the boundaries of video generation from language! ## Text-to-3D models In addition to text-to-video, we also have text-to-3D generation now thanks to OpenAI’s [Shap-E](https://hf.co/papers/2305.02463) model. Shap-E is trained by encoding a large dataset of 3D-text pairs, and a diffusion model is conditioned on the encoder’s outputs. You can design 3D assets for video games, interior design, and architecture. Try it out today with the [`ShapEPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/shap_e#diffusers.ShapEPipeline) and [`ShapEImg2ImgPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/shap_e#diffusers.ShapEImg2ImgPipeline). <div class="flex justify-center"> <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/cake_out.gif" alt="3D render of a birthday cupcake generated using SHAP-E."/> </div> ## Image editing pipelines Image editing is one of the most practical use cases in fashion, material design, and photography. With diffusion models, the possibilities of image editing continue to expand. We have many [pipelines](https://huggingface.co/docs/diffusers/main/en/using-diffusers/controlling_generation) in 🤗 Diffusers to support image editing. There are image editing pipelines that allow you to describe your desired edit as a prompt, removing concepts from an image, and even a pipeline that unifies multiple generation methods to create high-quality images like panoramas. With 🤗 Diffusers, you can experiment with the future of photo editing now! ## Faster diffusion models Diffusion models are known to be time-intensive because of their iterative steps. With OpenAI’s [Consistency Models](https://huggingface.co/papers/2303.01469), the image generation process is significantly faster. Generating a single 256x256 resolution image only takes 3/4 of a second on a modern CPU! You can try this out in 🤗 Diffusers with the [`ConsistencyModelPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/consistency_models). On top of speedier diffusion models, we also offer many optimization techniques for faster inference like [PyTorch 2.0’s `scaled_dot_product_attention()` (SDPA) and `torch.compile()`](https://pytorch.org/blog/accelerated-diffusers-pt-20), sliced attention, feed-forward chunking, VAE tiling, CPU and model offloading, and more. These optimizations save memory, which translates to faster generation, and allow you to run inference on consumer GPUs. When you distribute a model with 🤗 Diffusers, all of these optimizations are immediately supported! In addition to that, we also support specific hardware and formats like ONNX, the `mps` PyTorch device for Apple Silicon computers, Core ML, and others. To learn more about how we optimize inference with 🤗 Diffusers, check out the [docs](https://huggingface.co/docs/diffusers/optimization/opt_overview)! ## Ethics and safety Generative models are cool, but they also have the ability to produce harmful and NSFW content. To help users interact with these models responsibly and ethically, we’ve added a [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) component that flags inappropriate content generated during inference. Model creators can choose to incorporate this component into their models if they want. In addition, generative models can also be used to produce disinformation. Earlier this year, the [Balenciaga Pope](https://www.theverge.com/2023/3/27/23657927/ai-pope-image-fake-midjourney-computer-generated-aesthetic) went viral for how realistic the image was despite it being fake. This underscores the importance and need for a mechanism to distinguish between generated and human content. That’s why we’ve added an invisible watermark for images generated by the SDXL model, which helps users be better informed. The development of these features is guided by our [ethical charter](https://huggingface.co/docs/diffusers/main/en/conceptual/ethical_guidelines), which you can find in our documentation. ## Support for LoRA Fine-tuning diffusion models is expensive and out of reach for most consumer GPUs. We added the Low-Rank Adaptation ([LoRA](https://huggingface.co/papers/2106.09685)) technique to close this gap. With LoRA, which is a method for parameter-efficient fine-tuning, you can fine-tune large diffusion models faster and consume less memory. The resulting model weights are also very lightweight compared to the original model, so you can easily share your custom models. If you want to learn more, [our documentation](https://huggingface.co/docs/diffusers/main/en/training/lora) shows how to perform fine-tuning and inference on Stable Diffusion with LoRA. In addition to LoRA, we support other [training techniques](https://huggingface.co/docs/diffusers/main/en/training/overview) for personalized generation, including DreamBooth, textual inversion, custom diffusion, and more! ## Torch 2.0 optimizations PyTorch 2.0 [introduced support](https://pytorch.org/get-started/pytorch-2.0/#pytorch-2x-faster-more-pythonic-and-as-dynamic-as-ever) for `torch.compile()`and `scaled_dot_product_attention()`, a more efficient implementation of the attention mechanism. 🤗 Diffusers [provides first-class support](https://huggingface.co/docs/diffusers/optimization/torch2.0) for these features resulting in massive speedups in inference latency, which can sometimes be more than twice as fast! In addition to visual content (images, videos, 3D assets, etc.), we also added support for audio! Check out [the documentation](https://huggingface.co/docs/diffusers/using-diffusers/audio) to learn more. ## Community highlights One of the most gratifying experiences of the past year has been seeing how the community is incorporating 🤗 Diffusers into their projects. From adapting Low-rank adaptation (LoRA) for faster training of text-to-image models to building a state-of-the-art inpainting tool, here are a few of our favorite projects: <div class="mx-auto max-w-screen-xl py-8"> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">We built Core ML Stable Diffusion to make it easier for developers to add state-of-the-art generative AI capabilities in their iOS, iPadOS and macOS apps with the highest efficiency on Apple Silicon. We built on top of 🤗 Diffusers instead of from scratch as 🤗 Diffusers consistently stays on top of a rapidly evolving field and promotes much needed interoperability of new and old ideas.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/10639145?s=200&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Atila Orhon</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">🤗 Diffusers has been absolutely developer-friendly for me to dive right into stable diffusion models. Main differentiating factor clearly being that 🤗 Diffusers implementation is often not some code from research lab, that are mostly focused on high velocity driven. While research codes are often poorly written and difficult to understand (lack of typing, assertions, inconsistent design patterns and conventions), 🤗 Diffusers was a breeze to use for me to hack my ideas within couple of hours. Without it, I would have needed to invest significantly more amount of time to start hacking. Well-written documentations and examples are extremely helpful as well.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/35953539?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Simo</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">BentoML is the unified framework for for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models. All Hugging Face Diffuser models and pipelines can be seamlessly integrated into BentoML applications, enabling the running of models on the most suitable hardware and independent scaling based on usage.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/49176046?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">BentoML</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">Invoke AI is an open-source Generative AI tool built to empower professional creatives, from game designers and photographers to architects and product designers. Invoke recently launched their hosted offering at invoke.ai, allowing users to generate assets from any computer, powered by the latest research in open-source.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/113954515?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">InvokeAI</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">TaskMatrix connects Large Language Model and a series of Visual Models to enable sending and receiving images during chatting.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/6154722?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Chenfei Wu</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">Lama Cleaner is a powerful image inpainting tool that uses Stable Diffusion technology to remove unwanted objects, defects, or people from your pictures. It can also erase and replace anything in your images with ease.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://github.com/Sanster/lama-cleaner/raw/main/assets/logo.png" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Qing</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">Grounded-SAM combines a powerful Zero-Shot detector Grounding-DINO and Segment-Anything-Model (SAM) to build a strong pipeline to detect and segment everything with text inputs. When combined with 🤗 Diffusers inpainting models, Grounded-SAM can do highly controllable image editing tasks, including replacing specific objects, inpainting the background, etc.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/113572103?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Tianhe Ren</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">Stable-Dreamfusion leverages the convenient implementations of 2D diffusion models in 🤗 Diffusers to replicate recent text-to-3D and image-to-3D methods.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/25863658?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">kiui</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">MMagic (Multimodal Advanced, Generative, and Intelligent Creation) is an advanced and comprehensive Generative AI toolbox that provides state-of-the-art AI models (e.g., diffusion models powered by 🤗 Diffusers and GAN) to synthesize, edit and enhance images and videos. In MMagic, users can use rich components to customize their own models like playing with Legos and manage the training loop easily.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/10245193?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">mmagic</p> </div> </div> </div> <div class="mb-8 sm:break-inside-avoid"> <blockquote class="rounded-xl !mb-0 bg-gray-50 p-6 shadow dark:bg-gray-800"> <p class="leading-relaxed text-gray-700">Tune-A-Video, developed by Jay Zhangjie Wu and his team at Show Lab, is the first to fine-tune a pre-trained text-to-image diffusion model using a single text-video pair and enables changing video content while preserving motion.</p> </blockquote> <div class="flex items-center gap-4"> <img src="https://avatars.githubusercontent.com/u/101181824?s=48&v=4" class="h-12 w-12 rounded-full object-cover" /> <div class="text-sm"> <p class="font-medium">Jay Zhangjie Wu</p> </div> </div> </div> </div> We also collaborated with Google Cloud (who generously provided the compute) to provide technical guidance and mentorship to help the community train diffusion models with TPUs (check out a summary of the event [here](https://opensource.googleblog.com/2023/06/controlling-stable-diffusion-with-jax-diffusers-and-cloud-tpus.html)). There were many cool models such as this [demo](https://huggingface.co/spaces/mfidabel/controlnet-segment-anything) that combines ControlNet with Segment Anything. <div class="flex justify-center"> <img src="https://github.com/mfidabel/JAX_SPRINT_2023/blob/8632f0fde7388d7a4fc57225c96ef3b8411b3648/EX_1.gif?raw=true" alt="ControlNet and SegmentAnything demo of a hot air balloon in various styles"> </div> Finally, we were delighted to receive contributions to our codebase from over 300 contributors, which allowed us to collaborate together in the most open way possible. Here are just a few of the contributions from our community: - [Model editing](https://github.com/huggingface/diffusers/pull/2721) by [@bahjat-kawar](https://github.com/bahjat-kawar), a pipeline for editing a model’s implicit assumptions - [LDM3D](https://github.com/huggingface/diffusers/pull/3668) by [@estelleafl](https://github.com/estelleafl), a diffusion model for 3D images - [DPMSolver](https://github.com/huggingface/diffusers/pull/3314) by [@LuChengTHU](https://github.com/LuChengTHU), improvements for significantly improving inference speed - [Custom Diffusion](https://github.com/huggingface/diffusers/pull/3031) by [@nupurkmr9](https://github.com/nupurkmr9), a technique for generating personalized images with only a few images of a subject Besides these, a heartfelt shoutout to the following contributors who helped us ship some of the most powerful features of Diffusers (in no particular order): * [@takuma104](https://github.com/huggingface/diffusers/commits?author=takuma104) * [@nipunjindal](https://github.com/huggingface/diffusers/commits?author=nipunjindal) * [@isamu-isozaki](https://github.com/huggingface/diffusers/commits?author=isamu-isozaki) * [@piEsposito](https://github.com/huggingface/diffusers/commits?author=piEsposito) * [@Birch-san](https://github.com/huggingface/diffusers/commits?author=Birch-san) * [@LuChengTHU](https://github.com/huggingface/diffusers/commits?author=LuChengTHU) * [@duongna21](https://github.com/huggingface/diffusers/commits?author=duongna21) * [@clarencechen](https://github.com/huggingface/diffusers/commits?author=clarencechen) * [@dg845](https://github.com/huggingface/diffusers/commits?author=dg845) * [@Abhinay1997](https://github.com/huggingface/diffusers/commits?author=Abhinay1997) * [@camenduru](https://github.com/huggingface/diffusers/commits?author=camenduru) * [@ayushtues](https://github.com/huggingface/diffusers/commits?author=ayushtues) ## Building products with 🤗 Diffusers Over the last year, we also saw many companies choosing to build their products on top of 🤗 Diffusers. Here are a couple of products that have caught our attention: - [PlaiDay](http://plailabs.com/): “PlaiDay is a Generative AI experience where people collaborate, create, and connect. Our platform unlocks the limitless creativity of the human mind, and provides a safe, fun social canvas for expression.” - [Previs One](https://previs.framer.wiki/): “Previs One is a diffuser pipeline for cinematic storyboarding and previsualization — it understands film and television compositional rules just as a director would speak them.” - [Zust.AI](https://zust.ai/): “We leverage Generative AI to create studio-quality product photos for brands and marketing agencies.” - [Dashtoon](https://dashtoon.com/): “Dashtoon is building a platform to create and consume visual content. We have multiple pipelines that load multiple LORAs, multiple control-nets and even multiple models powered by diffusers. Diffusers has made the gap between a product engineer and a ML engineer super low allowing dashtoon to ship user value faster and better.” - [Virtual Staging AI](https://www.virtualstagingai.app/): "Filling empty rooms with beautiful furniture using generative models.” - [Hexo.AI](https://www.hexo.ai/): “Hexo AI helps brands get higher ROI on marketing spends through Personalized Marketing at Scale. Hexo is building a proprietary campaign generation engine which ingests customer data and generates brand compliant personalized creatives.” If you’re building products on top of 🤗 Diffusers, we’d love to chat to understand how we can make the library better together! Feel free to reach out to patrick@hf.co or sayak@hf.co. ## Looking forward As we celebrate our first anniversary, we're grateful to our community and open-source contributors who have helped us come so far in such a short time. We're happy to share that we'll be presenting a 🤗 Diffusers demo at ICCV 2023 this fall – if you're attending, do come and see us! We'll continue to develop and improve our library, making it easier for everyone to use. We're also excited to see what the community will create next with our tools and resources. Thank you for being a part of our journey so far, and we look forward to continuing to democratize good machine learning together! 🥳 ❤️ Diffusers team --- **Acknowledgements**: Thank you to [Omar Sanseviero](https://huggingface.co/osanseviero), [Patrick von Platen](https://huggingface.co/patrickvonplaten), [Giada Pistilli](https://huggingface.co/giadap) for their reviews, and [Chunte Lee](https://huggingface.co/Chunte) for designing the thumbnail.
blog/diffusers-turns-1.md/0
{ "file_path": "blog/diffusers-turns-1.md", "repo_id": "blog", "token_count": 7388 }
30
--- title: "Ethics and Society Newsletter #3: Ethical Openness at Hugging Face" thumbnail: /blog/assets/137_ethics_soc_3/ethics_3_thumbnail.png authors: - user: irenesolaiman - user: giadap - user: NimaBoscarino - user: yjernite - user: allendorf - user: meg - user: sasha --- # Ethics and Society Newsletter #3: Ethical Openness at Hugging Face ## Mission: Open and Good ML In our mission to democratize good machine learning (ML), we examine how supporting ML community work also empowers examining and preventing possible harms. Open development and science decentralizes power so that many people can collectively work on AI that reflects their needs and values. While [openness enables broader perspectives to contribute to research and AI overall, it faces the tension of less risk control](https://arxiv.org/abs/2302.04844). Moderating ML artifacts presents unique challenges due to the dynamic and rapidly evolving nature of these systems. In fact, as ML models become more advanced and capable of producing increasingly diverse content, the potential for harmful or unintended outputs grows, necessitating the development of robust moderation and evaluation strategies. Moreover, the complexity of ML models and the vast amounts of data they process exacerbate the challenge of identifying and addressing potential biases and ethical concerns. As hosts, we recognize the responsibility that comes with potentially amplifying harm to our users and the world more broadly. Often these harms disparately impact minority communities in a context-dependent manner. We have taken the approach of analyzing the tensions in play for each context, open to discussion across the company and Hugging Face community. While many models can amplify harm, especially discriminatory content, we are taking a series of steps to identify highest risk models and what action to take. Importantly, active perspectives from many backgrounds is key to understanding, measuring, and mitigating potential harms that affect different groups of people. We are crafting tools and safeguards in addition to improving our documentation practices to ensure open source science empowers individuals and continues to minimize potential harms. ## Ethical Categories The first major aspect of our work to foster good open ML consists in promoting the tools and positive examples of ML development that prioritize values and consideration for its stakeholders. This helps users take concrete steps to address outstanding issues, and present plausible alternatives to de facto damaging practices in ML development. To help our users discover and engage with ethics-related ML work, we have compiled a set of tags. These 6 high-level categories are based on our analysis of Spaces that community members had contributed. They are designed to give you a jargon-free way of thinking about ethical technology: - Rigorous work pays special attention to developing with best practices in mind. In ML, this can mean examining failure cases (including conducting bias and fairness audits), protecting privacy through security measures, and ensuring that potential users (technical and non-technical) are informed about the project's limitations. - Consentful work [supports](https://www.consentfultech.io/) the self-determination of people who use and are affected by these technologies. - Socially Conscious work shows us how technology can support social, environmental, and scientific efforts. - Sustainable work highlights and explores techniques for making machine learning ecologically sustainable. - Inclusive work broadens the scope of who builds and benefits in the machine learning world. - Inquisitive work shines a light on inequities and power structures which challenge the community to rethink its relationship to technology. Read more at https://huggingface.co/ethics Look for these terms as we’ll be using these tags, and updating them based on community contributions, across some new projects on the Hub! ## Safeguards Taking an “all-or-nothing” view of open releases ignores the wide variety of contexts that determine an ML artifact’s positive or negative impacts. Having more levers of control over how ML systems are shared and re-used supports collaborative development and analysis with less risk of promoting harmful uses or misuses; allowing for more openness and participation in innovation for shared benefits. We engage directly with contributors and have addressed pressing issues. To bring this to the next level, we are building community-based processes. This approach empowers both Hugging Face contributors, and those affected by contributions, to inform the limitations, sharing, and additional mechanisms necessary for models and data made available on our platform. The three main aspects we will pay attention to are: the origin of the artifact, how the artifact is handled by its developers, and how the artifact has been used. In that respect we: - launched a [flagging feature](https://twitter.com/GiadaPistilli/status/1571865167092396033) for our community to determine whether ML artifacts or community content (model, dataset, space, or discussion) violate our [content guidelines](https://huggingface.co/content-guidelines), - monitor our community discussion boards to ensure Hub users abide by the [code of conduct](https://huggingface.co/code-of-conduct), - robustly document our most-downloaded models with model cards that detail social impacts, biases, and intended and out-of-scope use cases, - create audience-guiding tags, such as the “Not For All Audiences” tag that can be added to the repository’s card metadata to avoid un-requested violent and sexual content, - promote use of [Open Responsible AI Licenses (RAIL)](https://huggingface.co/blog/open_rail) for [models](https://www.licenses.ai/blog/2022/8/26/bigscience-open-rail-m-license), such as with LLMs ([BLOOM](https://huggingface.co/spaces/bigscience/license), [BigCode](https://huggingface.co/spaces/bigcode/license)), - conduct research that [analyzes](https://arxiv.org/abs/2302.04844) which models and datasets have the highest potential for, or track record of, misuse and malicious use. **How to use the flagging function:** Click on the flag icon on any Model, Dataset, Space, or Discussion: <p align="center"> <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ethics_soc_3/flag2.jpg" alt="screenshot pointing to the flag icon to Report this model" /> <em> While logged in, you can click on the "three dots" button to bring up the ability to report (or flag) a repository. This will open a conversation in the repository's community tab. </em> </p> Share why you flagged this item: <p align="center"> <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ethics_soc_3/flag1.jpg" alt="screenshot showing the text window where you describe why you flagged this item" /> <em> Please add as much relevant context as possible in your report! This will make it much easier for the repo owner and HF team to start taking action. </em> </p> In prioritizing open science, we examine potential harm on a case-by-case basis and provide an opportunity for collaborative learning and shared responsibility. When users flag a system, developers can directly and transparently respond to concerns. In this spirit, we ask that repository owners make reasonable efforts to address reports, especially when reporters take the time to provide a description of the issue. We also stress that the reports and discussions are subject to the same communication norms as the rest of the platform. Moderators are able to disengage from or close discussions should behavior become hateful and/or abusive (see [code of conduct](https://huggingface.co/code-of-conduct)). Should a specific model be flagged as high risk by our community, we consider: - Downgrading the ML artifact’s visibility across the Hub in the trending tab and in feeds, - Requesting that the gating feature be enabled to manage access to ML artifacts (see documentation for [models](https://huggingface.co/docs/hub/models-gated) and [datasets](https://huggingface.co/docs/hub/datasets-gated)), - Requesting that the models be made private, - Disabling access. **How to add the “Not For All Audiences” tag:** Edit the model/data card → add `not-for-all-audiences` in the tags section → open the PR and wait for the authors to merge it. Once merged, the following tag will be displayed on the repository: <p align="center"> <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ethics_soc_3/nfaa_tag.png" alt="screenshot showing where to add tags" /> </p> Any repository tagged `not-for-all-audiences` will display the following popup when visited: <p align="center"> <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ethics_soc_3/nfaa2.png" alt="screenshot showing where to add tags" /> </p> Clicking "View Content" will allow you to view the repository as normal. If you wish to always view `not-for-all-audiences`-tagged repositories without the popup, this setting can be changed in a user's [Content Preferences](https://huggingface.co/settings/content-preferences) <p align="center"> <br> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ethics_soc_3/nfaa1.png" alt="screenshot showing where to add tags" /> </p> Open science requires safeguards, and one of our goals is to create an environment informed by tradeoffs with different values. Hosting and providing access to models in addition to cultivating community and discussion empowers diverse groups to assess social implications and guide what is good machine learning. ## Are you working on safeguards? Share them on Hugging Face Hub! The most important part of Hugging Face is our community. If you’re a researcher working on making ML safer to use, especially for open science, we want to support and showcase your work! Here are some recent demos and tools from researchers in the Hugging Face community: - [A Watermark for LLMs](https://huggingface.co/spaces/tomg-group-umd/lm-watermarking) by John Kirchenbauer, Jonas Geiping, Yuxin Wen, Jonathan Katz, Ian Miers, Tom Goldstein ([paper](https://arxiv.org/abs/2301.10226)) - [Generate Model Cards Tool](https://huggingface.co/spaces/huggingface/Model_Cards_Writing_Tool) by the Hugging Face team - [Photoguard](https://huggingface.co/spaces/RamAnanth1/photoguard) to safeguard images against manipulation by Ram Ananth Thanks for reading! 🤗 ~ Irene, Nima, Giada, Yacine, and Elizabeth, on behalf of the Ethics and Society regulars If you want to cite this blog post, please use the following (in descending order of contribution): ``` @misc{hf_ethics_soc_blog_3, author = {Irene Solaiman and Giada Pistilli and Nima Boscarino and Yacine Jernite and Elizabeth Allendorf and Margaret Mitchell and Carlos Muñoz Ferrandis and Nathan Lambert and Alexandra Sasha Luccioni }, title = {Hugging Face Ethics and Society Newsletter 3: Ethical Openness at Hugging Face}, booktitle = {Hugging Face Blog}, year = {2023}, url = {https://doi.org/10.57967/hf/0487}, doi = {10.57967/hf/0487} } ```
blog/ethics-soc-3.md/0
{ "file_path": "blog/ethics-soc-3.md", "repo_id": "blog", "token_count": 2968 }
31
--- title: 'Few-shot learning in practice: GPT-Neo and the 🤗 Accelerated Inference API' # thumbnail: /blog/assets/22_few_shot_learning_gpt_neo_and_inference_api/thumbnail.png authors: - user: philschmid --- # Few-shot learning in practice: GPT-Neo and the 🤗 Accelerated Inference API In many Machine Learning applications, the amount of available labeled data is a barrier to producing a high-performing model. The latest developments in NLP show that you can overcome this limitation by providing a few examples at inference time with a large language model - a technique known as Few-Shot Learning. In this blog post, we'll explain what Few-Shot Learning is, and explore how a large language model called GPT-Neo, and the 🤗 Accelerated Inference API, can be used to generate your own predictions. ## What is Few-Shot Learning? Few-Shot Learning refers to the practice of feeding a machine learning model with a very small amount of training data to guide its predictions, like a few examples at inference time, as opposed to standard fine-tuning techniques which require a relatively large amount of training data for the pre-trained model to adapt to the desired task with accuracy. This technique has been mostly used in computer vision, but with some of the latest Language Models, like [EleutherAI GPT-Neo](https://www.eleuther.ai/research/projects/gpt-neo/) and [OpenAI GPT-3](https://openai.com/blog/gpt-3-apps/), we can now use it in Natural Language Processing (NLP). In NLP, Few-Shot Learning can be used with Large Language Models, which have learned to perform a wide number of tasks implicitly during their pre-training on large text datasets. This enables the model to generalize, that is to understand related but previously unseen tasks, with just a few examples. Few-Shot NLP examples consist of three main components: - **Task Description**: A short description of what the model should do, e.g. "Translate English to French" - **Examples**: A few examples showing the model what it is expected to predict, e.g. "sea otter => loutre de mer" - **Prompt**: The beginning of a new example, which the model should complete by generating the missing text, e.g. "cheese => " ![few-shot-prompt](assets/22_few_shot_learning_gpt_neo_and_inference_api/few-shot-prompt.png) <small>Image from <a href="https://arxiv.org/abs/2005.14165" target="_blank">Language Models are Few-Shot Learners</a></small> Creating these few-shot examples can be tricky, since you need to articulate the “task” you want the model to perform through them. A common issue is that models, especially smaller ones, are very sensitive to the way the examples are written. An approach to optimize Few-Shot Learning in production is to learn a common representation for a task and then train task-specific classifiers on top of this representation. OpenAI showed in the [GPT-3 Paper](https://arxiv.org/abs/2005.14165) that the few-shot prompting ability improves with the number of language model parameters. ![few-shot-performance](assets/22_few_shot_learning_gpt_neo_and_inference_api/few-shot-performance.png) <small>Image from <a href="https://arxiv.org/abs/2005.14165" target="_blank">Language Models are Few-Shot Learners</a></small> Let's now take a look at how at how GPT-Neo and the 🤗 Accelerated Inference API can be used to generate your own Few-Shot Learning predictions! --- ## What is GPT-Neo? GPT⁠-⁠Neo is a family of transformer-based language models from [EleutherAI](https://www.eleuther.ai/projects/gpt-neo/) based on the GPT architecture. [EleutherAI](https://www.eleuther.ai)'s primary goal is to train a model that is equivalent in size to GPT⁠-⁠3 and make it available to the public under an open license. All of the currently available GPT-Neo checkpoints are trained with the Pile dataset, a large text corpus that is extensively documented in ([Gao et al., 2021](https://arxiv.org/abs/2101.00027)). As such, it is expected to function better on the text that matches the distribution of its training text; we recommend keeping this in mind when designing your examples. --- ## 🤗 Accelerated Inference API The [Accelerated Inference API](https://huggingface.co/inference-api) is our hosted service to run inference on any of the 10,000+ models publicly available on the 🤗 Model Hub, or your own private models, via simple API calls. The API includes acceleration on CPU and GPU with [up to 100x speedup](https://huggingface.co/blog/accelerated-inference) compared to out of the box deployment of Transformers. To integrate Few-Shot Learning predictions with `GPT-Neo` in your own apps, you can use the 🤗 Accelerated Inference API with the code snippet below. You can find your API Token [here](https://huggingface.co/settings/token), if you don't have an account you can get started [here](https://huggingface.co/pricing). ```python import json import requests API_TOKEN = "" def query(payload='',parameters=None,options={'use_cache': False}): API_URL = "https://api-inference.huggingface.co/models/EleutherAI/gpt-neo-2.7B" headers = {"Authorization": f"Bearer {API_TOKEN}"} body = {"inputs":payload,'parameters':parameters,'options':options} response = requests.request("POST", API_URL, headers=headers, data= json.dumps(body)) try: response.raise_for_status() except requests.exceptions.HTTPError: return "Error:"+" ".join(response.json()['error']) else: return response.json()[0]['generated_text'] parameters = { 'max_new_tokens':25, # number of generated tokens 'temperature': 0.5, # controlling the randomness of generations 'end_sequence': "###" # stopping sequence for generation } prompt="...." # few-shot prompt data = query(prompt,parameters,options) ``` --- ## Practical Insights Here are some practical insights, which help you get started using `GPT-Neo` and the 🤗 Accelerated Inference API. Since `GPT-Neo` (2.7B) is about 60x smaller than `GPT-3` (175B), it does not generalize as well to zero-shot problems and needs 3-4 examples to achieve good results. When you provide more examples `GPT-Neo` understands the task and takes the `end_sequence` into account, which allows us to control the generated text pretty well. ![insights-benefit-of-examples](assets/22_few_shot_learning_gpt_neo_and_inference_api/insights-benefit-of-examples.png) The hyperparameter `End Sequence`, `Token Length` & `Temperature` can be used to control the `text-generation` of the model and you can use this to your advantage to solve the task you need. The `Temperature` controlls the randomness of your generations, lower temperature results in less random generations and higher temperature results in more random generations. ![insights-benefit-of-hyperparameter](assets/22_few_shot_learning_gpt_neo_and_inference_api/insights-benefit-of-hyperparameter.png) In the example, you can see how important it is to define your hyperparameter. These can make the difference between solving your task or failing miserably. --- ## Responsible Use Few-Shot Learning is a powerful technique but also presents unique pitfalls that need to be taken into account when designing uses cases. To illustrate this, let's consider the default `Sentiment Analysis` setting provided in the widget. After seeing three examples of sentiment classification, the model makes the following predictions 4 times out of 5, with `temperature` set to 0.1: > ### > Tweet: "I'm a disabled happy person" > Sentiment: Negative What could go wrong? Imagine that you are using sentiment analysis to aggregate reviews of products on an online shopping website: a possible outcome could be that items useful to people with disabilities would be automatically down-ranked - a form of automated discrimination. For more on this specific issue, we recommend the ACL 2020 paper [Social Biases in NLP Models as Barriers for Persons with Disabilities](https://www.aclweb.org/anthology/2020.acl-main.487.pdf). Because Few-Shot Learning relies more directly on information and associations picked up from pre-training, it makes it more sensitive to this type of failures. How to minimize the risk of harm? Here are some practical recommendations. ### Best practices for responsible use - Make sure people know which parts of their user experience depend on the outputs of the ML system - If possible, give users the ability to opt-out - Provide a mechanism for users to give feedback on the model decision, and to override it - Monitor feedback, especially model failures, for groups of users that may be disproportionately affected What needs most to be avoided is to use the model to automatically make decisions for, or about, a user, without opportunity for a human to provide input or correct the output. Several regulations, such as [GDPR](https://gdpr-info.eu/) in Europe, require that users be provided an explanation for automatic decisions made about them. --- To use GPT-Neo or any Hugging Face model in your own application, you can [start a free trial](https://huggingface.co/pricing) of the 🤗 Accelerated Inference API. If you need help mitigating bias in models and AI systems, or leveraging Few-Shot Learning, the 🤗 Expert Acceleration Program can [offer your team direct premium support from the Hugging Face team](https://huggingface.co/support).
blog/few-shot-learning-gpt-neo-and-inference-api.md/0
{ "file_path": "blog/few-shot-learning-gpt-neo-and-inference-api.md", "repo_id": "blog", "token_count": 2536 }
32
--- title: "Welcome Gemma - Google’s new open LLM" thumbnail: /blog/assets/gemma/thumbnail.jpg authors: - user: philschmid - user: osanseviero - user: pcuenq --- # Welcome Gemma - Google’s new open LLM Gemma, a new family of state-of-the-art open LLMs, was released today by Google! It's great to see Google reinforcing its commitment to open-source AI, and we’re excited to fully support the launch with comprehensive integration in Hugging Face. Gemma comes in two sizes: 7B parameters, for efficient deployment and development on consumer-size GPU and TPU and 2B versions for CPU and on-device applications. Both come in base and instruction-tuned variants. We’ve collaborated with Google to ensure the best integration into the Hugging Face ecosystem. You can find the 4 open-access models (2 base models & 2 fine-tuned ones) on the Hub. Among the features and integrations being released, we have: - [Models on the Hub](https://huggingface.co/models?search=google/gemma), with their model cards and licenses - [🤗 Transformers integration](https://github.com/huggingface/transformers/releases/tag/v4.38.0) - Integration with Google Cloud - Integration with Inference Endpoints - An example of fine-tuning Gemma on a single GPU with 🤗 TRL ## Table of contents - [What is Gemma?](#what-is-gemma) - [Prompt format](#prompt-format) - [Exploring the Unknowns](#exploring-the-unknowns) - [Demo](#demo) - [Using 🤗 Transformers](#using-🤗-transformers) - [JAX Weights](#jax-weights) - [Integration with Google Cloud](#integration-with-google-cloud) - [Integration with Inference Endpoints](#integration-with-inference-endpoints) - [Fine-tuning with 🤗 TRL](#fine-tuning-with-🤗-trl) - [Additional Resources](#additional-resources) - [Acknowledgments](#acknowledgments) ## What is Gemma? Gemma is a family of 4 new LLM models by Google based on Gemini. It comes in two sizes: 2B and 7B parameters, each with base (pretrained) and instruction-tuned versions. All the variants can be run on various types of consumer hardware, even without quantization, and have a context length of 8K tokens: - [gemma-7b](https://huggingface.co/google/gemma-7b): Base 7B model. - [gemma-7b-it](https://huggingface.co/google/gemma-7b-it): Instruction fine-tuned version of the base 7B model. - [gemma-2b](https://huggingface.co/google/gemma-2b): Base 2B model. - [gemma-2b-it](https://huggingface.co/google/gemma-2b-it): Instruction fine-tuned version of the base 2B model. A month after the original release, Google released a new version of the instruct models. This version has better coding capabilities, factuality, instruction following and multi-turn quality. The model also is less prone to begin its with "Sure,". - [gemma-1.1-7b-it](https://huggingface.co/google/gemma-1.1-7b-it) - [gemma-1.1-2b-it](https://huggingface.co/google/gemma-1.1-2b-it) <div class="flex items-center justify-center"> <img src="/blog/assets/gemma/Gemma-logo-small.png" alt="Gemma logo"> </div> So, how good are the Gemma models? Here’s an overview of the base models and their performance compared to other open models on the [LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) (higher scores are better): | Model | License | Commercial use? | Pretraining size [tokens] | Leaderboard score ⬇️ | | -------------------------------------------------------------------------------- | --------------- | --------------- | ------------------------- | -------------------- | | [LLama 2 70B Chat (reference)](https://huggingface.co/meta-llama/Llama-2-70b-hf) | Llama 2 license | ✅ | 2T | 67.87 | | [Gemma-7B](https://huggingface.co/google/gemma-7b) | Gemma license | ✅ | 6T | 63.75 | | [DeciLM-7B](https://huggingface.co/Deci/DeciLM-7B) | Apache 2.0 | ✅ | unknown | 61.55 | | [PHI-2 (2.7B)](https://huggingface.co/microsoft/phi-2) | MIT | ✅ | 1.4T | 61.33 | | [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) | Apache 2.0 | ✅ | unknown | 60.97 | | [Llama 2 7B](https://huggingface.co/meta-llama/Llama-2-7b-hf) | Llama 2 license | ✅ | 2T | 54.32 | | [Gemma 2B](https://huggingface.co/google/gemma-2b) | Gemma license | ✅ | 2T | 46.51 | Gemma 7B is a really strong model, with performance comparable to the best models in the 7B weight, including Mistral 7B. Gemma 2B is an interesting model for its size, but it doesn’t score as high in the leaderboard as the best capable models with a similar size, such as Phi 2. We are looking forward to receiving feedback from the community about real-world usage! Recall that the LLM Leaderboard is especially useful for measuring the quality of pretrained models and not so much of the chat ones. We encourage running other benchmarks such as MT Bench, EQ Bench, and the lmsys Arena for the Chat ones! ### Prompt format The base models have no prompt format. Like other base models, they can be used to continue an input sequence with a plausible continuation or for zero-shot/few-shot inference. They are also a great foundation for fine-tuning on your own use cases. The Instruct versions have a very simple conversation structure: ```xml <start_of_turn>user knock knock<end_of_turn> <start_of_turn>model who is there<end_of_turn> <start_of_turn>user LaMDA<end_of_turn> <start_of_turn>model LaMDA who?<end_of_turn> ``` This format has to be exactly reproduced for effective use. We’ll later show how easy it is to reproduce the instruct prompt with the chat template available in `transformers`. ### Exploring the Unknowns The Technical report includes information about the training and evaluation processes of the base models, but there are no extensive details on the dataset’s composition and preprocessing. We know they were trained with data from various sources, mostly web documents, code, and mathematical texts. The data was filtered to remove CSAM content and PII as well as licensing checks. Similarly, for the Gemma instruct models, no details have been shared about the fine-tuning datasets or the hyperparameters associated with SFT and [RLHF](https://huggingface.co/blog/rlhf). ## Demo You can chat with the Gemma Instruct model on Hugging Chat! Check out the link here: https://huggingface.co/chat/models/google/gemma-1.1-7b-it ### Using 🤗 Transformers With Transformers [release 4.38](https://github.com/huggingface/transformers/releases/tag/v4.38.0), you can use Gemma and leverage all the tools within the Hugging Face ecosystem, such as: - training and inference scripts and examples - safe file format (`safetensors`) - integrations with tools such as bitsandbytes (4-bit quantization), PEFT (parameter efficient fine-tuning), and Flash Attention 2 - utilities and helpers to run generation with the model - mechanisms to export the models to deploy In addition, Gemma models are compatible with `torch.compile()` with CUDA graphs, giving them a ~4x speedup at inference time! To use Gemma models with transformers, make sure to use the latest `transformers` release: ```jsx pip install -U "transformers==4.38.1" --upgrade ``` The following snippet shows how to use `gemma-7b-it` with transformers. It requires about 18 GB of RAM, which includes consumer GPUs such as 3090 or 4090. ```python from transformers import AutoTokenizer import transformers import torch model = "google/gemma-7b-it" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda", ) messages = [ {"role": "user", "content": "Who are you? Please, answer in pirate-speak."}, ] prompt = pipeline.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipeline( prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95 ) print(outputs[0]["generated_text"][len(prompt):]) ``` > `Avast me, me hearty. I am a pirate of the high seas, ready to pillage and plunder. Prepare for a tale of adventure and booty!` > We used `bfloat16` because that’s the reference precision and how all evaluations were run. Running in `float16` may be faster on your hardware. You can also automatically quantize the model, loading it in 8-bit or even 4-bit mode. 4-bit loading takes about 9 GB of memory to run, making it compatible with a lot of consumer cards and all the GPUs in Google Colab. This is how you’d load the generation pipeline in 4-bit: ```jsx pipeline = pipeline( "text-generation", model=model, model_kwargs={ "torch_dtype": torch.float16, "quantization_config": {"load_in_4bit": True} }, ) ``` For more details on using the models with transformers, please check [the model cards](https://huggingface.co/gg-hf/gemma-7b). ### JAX Weights All the Gemma model variants are available for use with PyTorch, as explained above, or JAX / Flax. To load Flax weights, you need to use the `flax` revision from the repo, as shown below: ```python import jax.numpy as jnp from transformers import AutoTokenizer, FlaxGemmaForCausalLM model_id = "google/gemma-2b" tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.padding_side = "left" model, params = FlaxGemmaForCausalLM.from_pretrained( model_id, dtype=jnp.bfloat16, revision="flax", _do_init=False, ) inputs = tokenizer("Valencia and Málaga are", return_tensors="np", padding=True) output = model.generate(**inputs, params=params, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output.sequences, skip_special_tokens=True) ``` > `['Valencia and Málaga are two of the most popular tourist destinations in Spain. Both cities boast a rich history, vibrant culture,']` > Please, [check out this notebook](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/jax_gemma.ipynb) for a comprehensive hands-on walkthrough on how to parallelize JAX inference on Colab TPUs! ## Integration with Google Cloud You can deploy and train Gemma on Google Cloud through Vertex AI or Google Kubernetes Engine (GKE), using [Text Generation Inference](https://huggingface.co/docs/text-generation-inference/index) and Transformers. To deploy the Gemma model from Hugging Face, go to the [model page](https://huggingface.co/google/gemma-7b-it) and click on [Deploy -> Google Cloud.](https://huggingface.co/google/gemma-7b-it) This will bring you to the Google Cloud Console, where you can 1-click deploy Gemma on Vertex AI or GKE. Text Generation Inference powers Gemma on Google Cloud and is the first integration as part of our [partnership with Google Cloud.](https://huggingface.co/blog/gcp-partnership) ![deploy on GCP](/blog/assets/gemma/gcp-deploy.png) You can also access Gemma directly through the Vertex AI Model Garden. To Tune the Gemma model from Hugging Face, go to the [model page](https://huggingface.co/google/gemma-7b-it) and click on [Train -> Google Cloud.](https://huggingface.co/google/gemma-7b-it) This will bring you to the Google Cloud Console, where you can access notebooks to tune Gemma on Vertex AI or GKE. ![train on GCP](/blog/assets/gemma/gcp-train-gemma.png) These integrations mark the first offerings we are launching together as a [result of our collaborative partnership with Google.](https://huggingface.co/blog/gcp-partnership) Stay tuned for more! ## Integration with Inference Endpoints You can deploy Gemma on Hugging Face's [Inference Endpoints](https://ui.endpoints.huggingface.co/new?repository=google%2Fgemma-7b-it), which uses Text Generation Inference as the backend. [Text Generation Inference](https://github.com/huggingface/text-generation-inference) is a production-ready inference container developed by Hugging Face to enable easy deployment of large language models. It has features such as continuous batching, token streaming, tensor parallelism for fast inference on multiple GPUs, and production-ready logging and tracing. To deploy a Gemma model, go to the [model page](https://huggingface.co/google/gemma-7b-it) and click on the [Deploy -> Inference Endpoints](https://ui.endpoints.huggingface.co/new?repository=google/gemma-7b-it) widget. You can learn more about [Deploying LLMs with Hugging Face Inference Endpoints](https://huggingface.co/blog/inference-endpoints-llm) in a previous blog post. Inference Endpoints supports [Messages API](https://huggingface.co/blog/tgi-messages-api) through Text Generation Inference, which allows you to switch from another closed model to an open one by simply changing the URL. ```bash from openai import OpenAI # initialize the client but point it to TGI client = OpenAI( base_url="<ENDPOINT_URL>" + "/v1/", # replace with your endpoint url api_key="<HF_API_TOKEN>", # replace with your token ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "user", "content": "Why is open-source software important?"}, ], stream=True, max_tokens=500 ) # iterate and print stream for message in chat_completion: print(message.choices[0].delta.content, end="") ``` ## Fine-tuning with 🤗 TRL Training LLMs can be technically and computationally challenging. In this section, we’ll look at the tools available in the Hugging Face ecosystem to efficiently train Gemma on consumer-size GPUs An example command to fine-tune Gemma on OpenAssistant’s [chat dataset](https://huggingface.co/datasets/OpenAssistant/oasst_top1_2023-08-25) can be found below. We use 4-bit quantization and [QLoRA](https://arxiv.org/abs/2305.14314) to conserve memory to target all the attention blocks' linear layers. First, install the nightly version of 🤗 TRL and clone the repo to access the [training script](https://github.com/huggingface/trl/blob/main/examples/scripts/sft.py): ```jsx pip install -U transformers trl peft bitsandbytes git clone https://github.com/huggingface/trl cd trl ``` Then you can run the script: ```jsx accelerate launch --config_file examples/accelerate_configs/multi_gpu.yaml --num_processes=1 \ examples/scripts/sft.py \ --model_name google/gemma-7b \ --dataset_name OpenAssistant/oasst_top1_2023-08-25 \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 1 \ --learning_rate 2e-4 \ --save_steps 20_000 \ --use_peft \ --lora_r 16 --lora_alpha 32 \ --lora_target_modules q_proj k_proj v_proj o_proj \ --load_in_4bit \ --output_dir gemma-finetuned-openassistant ``` This takes about 9 hours to train on a single A10G, but can be easily parallelized by tweaking `--num_processes` to the number of GPUs you have available. ## Additional Resources - [Models on the Hub](https://huggingface.co/models?other=gemma) - Open LLM [Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) - [Chat demo on Hugging Chat](https://huggingface.co/chat?model=google/gemma-7b-it) - [Official Gemma Blog](https://blog.google/technology/developers/gemma-open-models/) - [Gemma Product Page](https://ai.google.dev/gemma) - [Vertex AI model garden link](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335) - Google Notebook ## Acknowledgments Releasing such models with support and evaluations in the ecosystem would not be possible without the contributions of many community members, including [Clémentine](https://huggingface.co/clefourrier) and [Eleuther Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) for LLM evaluations; [Olivier](https://huggingface.co/olivierdehaene) and [David](https://huggingface.co/drbh) for Text Generation Inference Support; [Simon](https://huggingface.co/sbrandeis) for developing the new access control features on Hugging Face; [Arthur](https://huggingface.co/ArthurZ), [Younes](https://huggingface.co/ybelkada), and [Sanchit](https://huggingface.co/sanchit-gandhi) for integrating Gemma into transformers; [Morgan](https://huggingface.co/mfuntowicz) for integrating Gemma into optimum-nvidia (coming); [Nathan](https://huggingface.co/nsarrazin), [Victor](https://huggingface.co/victor), and [Mishig](https://huggingface.co/mishig) for making Gemma available in Hugging Chat. And Thank you to the Google Team for releasing Gemma and making it available to the open-source AI community!
blog/gemma.md/0
{ "file_path": "blog/gemma.md", "repo_id": "blog", "token_count": 5784 }
33
--- title: "Graph Classification with Transformers" thumbnail: /blog/assets/125_intro-to-graphml/thumbnail_classification.png --- # Graph classification with Transformers <div class="blog-metadata"> <small>Published April 14, 2023.</small> <a target="_blank" class="btn no-underline text-sm mb-5 font-sans" href="https://github.com/huggingface/blog/blob/main/graphml-classification.md"> Update on GitHub </a> </div> <div class="author-card"> <a href="/clefourrier"> <img class="avatar avatar-user" src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1644340617257-noauth.png?w=200&h=200&f=face" title="Gravatar"> <div class="bfc"> <code>clefourrier</code> <span class="fullname">Clémentine Fourrier</span> </div> </a> </div> In the previous [blog](https://huggingface.co/blog/intro-graphml), we explored some of the theoretical aspects of machine learning on graphs. This one will explore how you can do graph classification using the Transformers library. (You can also follow along by downloading the demo notebook [here](https://github.com/huggingface/blog/blob/main/notebooks/graphml-classification.ipynb)!) At the moment, the only graph transformer model available in Transformers is Microsoft's [Graphormer](https://arxiv.org/abs/2106.05234), so this is the one we will use here. We are looking forward to seeing what other models people will use and integrate 🤗 ## Requirements To follow this tutorial, you need to have installed `datasets` and `transformers` (version >= 4.27.2), which you can do with `pip install -U datasets transformers`. ## Data To use graph data, you can either start from your own datasets, or use [those available on the Hub](https://huggingface.co/datasets?task_categories=task_categories:graph-ml&sort=downloads). We'll focus on using already available ones, but feel free to [add your datasets](https://huggingface.co/docs/datasets/upload_dataset)! ### Loading Loading a graph dataset from the Hub is very easy. Let's load the `ogbg-mohiv` dataset (a baseline from the [Open Graph Benchmark](https://ogb.stanford.edu/) by Stanford), stored in the `OGB` repository: ```python from datasets import load_dataset # There is only one split on the hub dataset = load_dataset("OGB/ogbg-molhiv") dataset = dataset.shuffle(seed=0) ``` This dataset already has three splits, `train`, `validation`, and `test`, and all these splits contain our 5 columns of interest (`edge_index`, `edge_attr`, `y`, `num_nodes`, `node_feat`), which you can see by doing `print(dataset)`. If you have other graph libraries, you can use them to plot your graphs and further inspect the dataset. For example, using PyGeometric and matplotlib: ```python import networkx as nx import matplotlib.pyplot as plt # We want to plot the first train graph graph = dataset["train"][0] edges = graph["edge_index"] num_edges = len(edges[0]) num_nodes = graph["num_nodes"] # Conversion to networkx format G = nx.Graph() G.add_nodes_from(range(num_nodes)) G.add_edges_from([(edges[0][i], edges[1][i]) for i in range(num_edges)]) # Plot nx.draw(G) ``` ### Format On the Hub, graph datasets are mostly stored as lists of graphs (using the `jsonl` format). A single graph is a dictionary, and here is the expected format for our graph classification datasets: - `edge_index` contains the indices of nodes in edges, stored as a list containing two parallel lists of edge indices. - **Type**: list of 2 lists of integers. - **Example**: a graph containing four nodes (0, 1, 2 and 3) and where connections are 1->2, 1->3 and 3->1 will have `edge_index = [[1, 1, 3], [2, 3, 1]]`. You might notice here that node 0 is not present here, as it is not part of an edge per se. This is why the next attribute is important. - `num_nodes` indicates the total number of nodes available in the graph (by default, it is assumed that nodes are numbered sequentially). - **Type**: integer - **Example**: In our above example, `num_nodes = 4`. - `y` maps each graph to what we want to predict from it (be it a class, a property value, or several binary label for different tasks). - **Type**: list of either integers (for multi-class classification), floats (for regression), or lists of ones and zeroes (for binary multi-task classification) - **Example**: We could predict the graph size (small = 0, medium = 1, big = 2). Here, `y = [0]`. - `node_feat` contains the available features (if present) for each node of the graph, ordered by node index. - **Type**: list of lists of integer (Optional) - **Example**: Our above nodes could have, for example, types (like different atoms in a molecule). This could give `node_feat = [[1], [0], [1], [1]]`. - `edge_attr` contains the available attributes (if present) for each edge of the graph, following the `edge_index` ordering. - **Type**: list of lists of integers (Optional) - **Example**: Our above edges could have, for example, types (like molecular bonds). This could give `edge_attr = [[0], [1], [1]]`. ### Preprocessing Graph transformer frameworks usually apply specific preprocessing to their datasets to generate added features and properties which help the underlying learning task (classification in our case). Here, we use Graphormer's default preprocessing, which generates in/out degree information, the shortest path between node matrices, and other properties of interest for the model. ```python from transformers.models.graphormer.collating_graphormer import preprocess_item, GraphormerDataCollator dataset_processed = dataset.map(preprocess_item, batched=False) ``` It is also possible to apply this preprocessing on the fly, in the DataCollator's parameters (by setting `on_the_fly_processing` to True): not all datasets are as small as `ogbg-molhiv`, and for large graphs, it might be too costly to store all the preprocessed data beforehand. ## Model ### Loading Here, we load an existing pretrained model/checkpoint and fine-tune it on our downstream task, which is a binary classification task (hence `num_classes = 2`). We could also fine-tune our model on regression tasks (`num_classes = 1`) or on multi-task classification. ```python from transformers import GraphormerForGraphClassification model = GraphormerForGraphClassification.from_pretrained( "clefourrier/pcqm4mv2_graphormer_base", num_classes=2, # num_classes for the downstream task ignore_mismatched_sizes=True, ) ``` Let's look at this in more detail. Calling the `from_pretrained` method on our model downloads and caches the weights for us. As the number of classes (for prediction) is dataset dependent, we pass the new `num_classes` as well as `ignore_mismatched_sizes` alongside the `model_checkpoint`. This makes sure a custom classification head is created, specific to our task, hence likely different from the original decoder head. It is also possible to create a new randomly initialized model to train from scratch, either following the known parameters of a given checkpoint or by manually choosing them. ### Training or fine-tuning To train our model simply, we will use a `Trainer`. To instantiate it, we will need to define the training configuration and the evaluation metric. The most important is the `TrainingArguments`, which is a class that contains all the attributes to customize the training. It requires a folder name, which will be used to save the checkpoints of the model. ```python from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "graph-classification", logging_dir="graph-classification", per_device_train_batch_size=64, per_device_eval_batch_size=64, auto_find_batch_size=True, # batch size can be changed automatically to prevent OOMs gradient_accumulation_steps=10, dataloader_num_workers=4, #1, num_train_epochs=20, evaluation_strategy="epoch", logging_strategy="epoch", push_to_hub=False, ) ``` For graph datasets, it is particularly important to play around with batch sizes and gradient accumulation steps to train on enough samples while avoiding out-of-memory errors. The last argument `push_to_hub` allows the Trainer to push the model to the Hub regularly during training, as each saving step. ```python trainer = Trainer( model=model, args=training_args, train_dataset=dataset_processed["train"], eval_dataset=dataset_processed["validation"], data_collator=GraphormerDataCollator(), ) ``` In the `Trainer` for graph classification, it is important to pass the specific data collator for the given graph dataset, which will convert individual graphs to batches for training. ```python train_results = trainer.train() trainer.push_to_hub() ``` When the model is trained, it can be saved to the hub with all the associated training artefacts using `push_to_hub`. As this model is quite big, it takes about a day to train/fine-tune for 20 epochs on CPU (IntelCore i7). To go faster, you could use powerful GPUs and parallelization instead, by launching the code either in a Colab notebook or directly on the cluster of your choice. ## Ending note Now that you know how to use `transformers` to train a graph classification model, we hope you will try to share your favorite graph transformer checkpoints, models, and datasets on the Hub for the rest of the community to use!
blog/graphml-classification.md/0
{ "file_path": "blog/graphml-classification.md", "repo_id": "blog", "token_count": 2760 }
34
--- title: "AMD + 🤗: Large Language Models Out-of-the-Box Acceleration with AMD GPU" thumbnail: /blog/assets/optimum_amd/amd_hf_logo_fixed.png authors: - user: fxmarty - user: IlyasMoutawwakil - user: mohitsha - user: echarlaix - user: seungrokj guest: true - user: mfuntowicz --- # AMD + 🤗: Large Language Models Out-of-the-Box Acceleration with AMD GPU Earlier this year, [AMD and Hugging Face announced a partnership](https://huggingface.co/blog/huggingface-and-amd) to accelerate AI models during the AMD's AI Day event. We have been hard at work to bring this vision to reality, and make it easy for the Hugging Face community to run the latest AI models on AMD hardware with the best possible performance. AMD is powering some of the most powerful supercomputers in the World, including the fastest European one, [LUMI](https://www.lumi-supercomputer.eu/lumi-retains-its-position-as-europes-fastest-supercomputer/), which operates over 10,000 MI250X AMD GPUs. At this event, AMD revealed their latest generation of server GPUs, the AMD [Instinct™ MI300](https://www.amd.com/fr/graphics/instinct-server-accelerators) series accelerators, which will soon become generally available. In this blog post, we provide an update on our progress towards providing great out-of-the-box support for AMD GPUs, and improving the interoperability for the latest server-grade AMD Instinct GPUs ## Out-of-the-box Acceleration Can you spot AMD-specific code changes below? Don't hurt your eyes, there's none compared to running on NVIDIA GPUs 🤗. ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "01-ai/Yi-6B" tokenizer = AutoTokenizer.from_pretrained(model_id) with torch.device("cuda"): model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16) inp = tokenizer(["Today I am in Paris and"], padding=True, return_tensors="pt").to("cuda") res = model.generate(**inp, max_new_tokens=30) print(tokenizer.batch_decode(res)) ``` One of the major aspects we have been working on is the ability to run Hugging Face Transformers models without any code change. We now support all Transformers models and tasks on AMD Instinct GPUs. And our collaboration is not stopping here, as we explore out-of-the-box support for diffusers models, and other libraries as well as other AMD GPUs. Achieving this milestone has been a significant effort and collaboration between our teams and companies. To maintain support and performances for the Hugging Face community, we have built integrated testing of Hugging Face open source libraries on AMD Instinct GPUs in our datacenters - and were able to minimize the carbon impact of these new workloads working with Verne Global to deploy the AMD Instinct servers in [Iceland](https://verneglobal.com/about-us/locations/iceland/). On top of native support, another major aspect of our collaboration is to provide integration for the latest innovations and features available on AMD GPUs. Through the collaboration of Hugging Face team, AMD engineers and open source community members, we are happy to announce [support for](https://huggingface.co/docs/optimum/amd/index): * Flash Attention v2 from AMD Open Source efforts in [ROCmSoftwarePlatform/flash-attention](https://github.com/ROCmSoftwarePlatform/flash-attention) integrated natively in [Transformers](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2) and [Text Generation Inference](https://huggingface.co/docs/text-generation-inference/quicktour). * Paged Attention from [vLLM](https://github.com/vllm-project/vllm/pull/1313), and various fused kernels available in [Text Generation Inference](https://huggingface.co/docs/text-generation-inference/quicktour) for ROCm. * [DeepSpeed](https://github.com/microsoft/DeepSpeed) for ROCm-powered GPUs using Transformers is also now officially validated and supported. * GPTQ, a common weight compression technique used to reduce the model memory requirements, is supported on ROCm GPUs through a direct integration with [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) and [Transformers](https://huggingface.co/blog/gptq-integration). * [Optimum-Benchmark](https://github.com/huggingface/optimum-benchmark), a utility to easily benchmark the performance of Transformers on AMD GPUs, in normal and distributed settings, with supported optimizations and quantization schemes. * Support of ONNX models execution on ROCm-powered GPUs using ONNX Runtime through the [ROCMExecutionProvider](https://onnxruntime.ai/docs/execution-providers/ROCm-ExecutionProvider.html) using [Optimum library](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/amdgpu). We are very excited to make these state of the art acceleration tools available and easy to use to Hugging Face users, and offer maintained support and performance with direct integration in our new continuous integration and development pipeline for AMD Instinct GPUs. One AMD Instinct MI250 GPU with 128 GB of High Bandwidth Memory has two distinct ROCm devices (GPU 0 and 1), each of them having 64 GB of High Bandwidth Memory. <br> <figure class="image table text-center m-0 w-full"> <img alt="" src="assets/optimum_amd/rocmsmi.png" /> <figcaption>MI250 two devices as displayed by `rocm-smi`</figcaption> </figure> <br> This means that with just one MI250 GPU card, we have two PyTorch devices that can be used very easily with tensor and data parallelism to achieve higher throughputs and lower latencies. In the rest of the blog post, we report performance results for the two steps involved during the text generation through large language models: * **Prefill latency**: The time it takes for the model to compute the representation for the user's provided input or prompt (also referred to as "Time To First Token"). * **Decoding per token latency**: The time it takes to generate each new token in an autoregressive manner after the prefill step. * **Decoding throughput**: The number of tokens generated per second during the decoding phase. Using [`optimum-benchmark`](https://github.com/huggingface/optimum-benchmark) and running [inference benchmarks](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-llamas) on an MI250 and an A100 GPU with and without optimizations, we get the following results: <br> <figure class="image table text-center m-0 w-full"> <img alt="" src="assets/optimum_amd/transformers_bench.png" /> <figcaption>Inference benchmarks using Transformers and PEFT libraries. FA2 stands for "Flash Attention 2", TP for "Tensor Parallelism", DDP for "Distributed Data Parallel".</figcaption> </figure> <br> In the plots above, we can see how performant the MI250 is, especially for production settings where requests are processed in big batches, delivering more than 2.33x more tokens (decode throughput) and taking half the time to the first token (prefill latency), compared to an A100 card. Running [training benchmarks](https://github.com/huggingface/optimum-benchmark/tree/main/examples/training-llamas) as seen below, one MI250 card fits larger batches of training samples and reaches higher training throughput. <br> <figure class="image table text-center m-0 w-9/12"> <img alt="" src="assets/optimum_amd/training_bench.png" /> <figcaption>Training benchmark using Transformers library at maximum batch size (power of two) that can fit on a given card</figcaption> </figure> <br> ## Production Solutions Another important focus for our collaboration is to build support for Hugging Face production solutions, starting with Text Generation Inference (TGI). TGI provides an end-to-end solution to deploy large language models for inference at scale. Initially, TGI was mostly driven towards Nvidia GPUs, leveraging most of the recent optimizations made for post Ampere architecture, such as Flash Attention v1 and v2, GPTQ weight quantization and Paged Attention. Today, we are happy to announce initial support for AMD Instinct MI210 and MI250 GPUs in TGI, leveraging all the great open-source work detailed above, integrated in a complete end-to-end solution, ready to be deployed. Performance-wise, we spent a lot of time benchmarking Text Generation Inference on AMD Instinct GPUs to validate and discover where we should focus on optimizations. As such, and with the support of AMD GPUs Engineers, we have been able to achieve matching performance compared to what TGI was already offering. In this context, and with the long-term relationship we are building between AMD and Hugging Face, we have been integrating and testing with the AMD GeMM Tuner tool which allows us to tune the GeMM (matrix multiplication) kernels we are using in TGI to find the best setup towards increased performances. GeMM Tuner tool is expected to be released [as part of PyTorch](https://github.com/pytorch/pytorch/pull/114894) in a coming release for everyone to benefit from it. With all of the above being said, we are thrilled to show the very first performance numbers demonstrating the latest AMD technologies, putting Text Generation Inference on AMD GPUs at the forefront of efficient inferencing solutions with Llama model family. <br> <figure class="image table text-center m-0 w-full"> <img alt="" src="assets/optimum_amd/tgi_34b.png" /> <figcaption>TGI latency results for Llama 34B, comparing one AMD Instinct MI250 against A100-SXM4-80GB. As explained above one MI250 corresponds to two PyTorch devices.</figcaption> </figure> <br> <br> <figure class="image table text-center m-0 w-full"> <img alt="" src="assets/optimum_amd/tgi_70b.png" /> <figcaption>TGI latency results for Llama 70B, comparing two AMD Instinct MI250 against two A100-SXM4-80GB (using tensor parallelism)</figcaption> </figure> <br> Missing bars for A100 correspond to out of memory errors, as Llama 70B weights 138 GB in float16, and enough free memory is necessary for intermediate activations, KV cache buffer (>5GB for 2048 sequence length, batch size 8), CUDA context, etc. The Instinct MI250 GPU has 128 GB global memory while an A100 has 80GB which explains the ability to run larger workloads (longer sequences, larger batches) on MI250. Text Generation Inference is [ready to be deployed](https://huggingface.co/docs/text-generation-inference/quicktour) in production on AMD Instinct GPUs through the docker image `ghcr.io/huggingface/text-generation-inference:1.2-rocm`. Make sure to refer to the [documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware) concerning the support and its limitations. ## What's next? We hope this blog post got you as excited as we are at Hugging Face about this partnership with AMD. Of course, this is just the very beginning of our journey, and we look forward to enabling more use cases on more AMD hardware. In the coming months, we will be working on bringing more support and validation for AMD Radeon GPUs, the same GPUs you can put in your own desktop for local usage, lowering down the accessibility barrier and paving the way for even more versatility for our users. Of course we'll soon be working on performance optimization for the MI300 lineup, ensuring that both the Open Source and the Solutions provide with the latest innovations at the highest stability level we are always looking for at Hugging Face. Another area of focus for us will be around AMD Ryzen AI technology, powering the latest generation of AMD laptop CPUs, allowing to run AI at the edge, on the device. At the time where Coding Assistant, Image Generation tools and Personal Assistant are becoming more and more broadly available, it is important to offer solutions which can meet the needs of privacy to leverage these powerful tools. In this context, Ryzen AI compatible models are already being made available on the [Hugging Face Hub](https://huggingface.co/models?other=RyzenAI) and we're working closely with AMD to bring more of them in the coming months.
blog/huggingface-and-optimum-amd.md/0
{ "file_path": "blog/huggingface-and-optimum-amd.md", "repo_id": "blog", "token_count": 3115 }
35
--- title: "Instruction-tuning Stable Diffusion with InstructPix2Pix" thumbnail: assets/instruction_tuning_sd/thumbnail.png authors: - user: sayakpaul --- # Instruction-tuning Stable Diffusion with InstructPix2Pix This post explores instruction-tuning to teach [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) to follow instructions to translate or process input images. With this method, we can prompt Stable Diffusion using an input image and an “instruction”, such as - *Apply a cartoon filter to the natural image*. | ![schematic](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/schematic.png) | |:--:| | **Figure 1**: We explore the instruction-tuning capabilities of Stable Diffusion. In this figure, we prompt an instruction-tuned Stable Diffusion system with prompts involving different transformations and input images. The tuned system seems to be able to learn these transformations stated in the input prompts. Figure best viewed in color and zoomed in. | This idea of teaching Stable Diffusion to follow user instructions to perform **edits** on input images was introduced in [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800). We discuss how to extend the InstructPix2Pix training strategy to follow more specific instructions related to tasks in image translation (such as cartoonization) and low-level image processing (such as image deraining). We cover: - [Introduction to instruction-tuning](#introduction-and-motivation) - [The motivation behind this work](#introduction-and-motivation) - [Dataset preparation](#dataset-preparation) - [Training experiments and results](#training-experiments-and-results) - [Potential applications and limitations](#potential-applications-and-limitations) - [Open questions](#open-questions) Our code, pre-trained models, and datasets can be found [here](https://github.com/huggingface/instruction-tuned-sd). ## Introduction and motivation Instruction-tuning is a supervised way of teaching language models to follow instructions to solve a task. It was introduced in [Fine-tuned Language Models Are Zero-Shot Learners](https://huggingface.co/papers/2109.01652) (FLAN) by Google. From recent times, you might recall works like [Alpaca](https://crfm.stanford.edu/2023/03/13/alpaca.html) and [FLAN V2](https://huggingface.co/papers/2210.11416), which are good examples of how beneficial instruction-tuning can be for various tasks. The figure below shows a formulation of instruction-tuning (also called “instruction-finetuning”). In the [FLAN V2 paper](https://huggingface.co/papers/2210.11416), the authors take a pre-trained language model ([T5](https://huggingface.co/docs/transformers/model_doc/t5), for example) and fine-tune it on a dataset of exemplars, as shown in the figure below. | ![flan_schematic](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/flan_schematic.png) | |:--:| | **Figure 2**: FLAN V2 schematic (figure taken from the FLAN V2 paper). | With this approach, one can create exemplars covering many different tasks, which makes instruction-tuning a multi-task training objective: | **Input** | **Label** | **Task** | |---|---|---| | Predict the sentiment of the<br>following sentence: “The movie<br>was pretty amazing. I could not<br>turn around my eyes even for a<br>second.” | Positive | Sentiment analysis /<br>Sequence classification | | Please answer the following<br>question. <br>What is the boiling point of<br>Nitrogen? | 320.4F | Question answering | | Translate the following<br>English sentence into German: “I have<br>a cat.” | Ich habe eine Katze. | Machine translation | | … | … | … | | | | | | Using a similar philosophy, the authors of FLAN V2 conduct instruction-tuning on a mixture of thousands of tasks and achieve zero-shot generalization to unseen tasks: | ![flan_dataset_overview](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/flan_dataset_overview.png) | |:--:| | **Figure 3**: FLAN V2 training and test task mixtures (figure taken from the FLAN V2 paper). | Our motivation behind this work comes partly from the FLAN line of work and partly from InstructPix2Pix. We wanted to explore if it’s possible to prompt Stable Diffusion with specific instructions and input images to process them as per our needs. The [pre-trained InstructPix2Pix models](https://huggingface.co/timbrooks/instruct-pix2pix) are good at following general instructions, but they may fall short of following instructions involving specific transformations: | ![cartoonization_results](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/cartoonization_results.jpeg) | |:--:| | **Figure 4**: We observe that for the input images (left column), our models (right column) more faithfully perform “cartoonization” compared to the pre-trained InstructPix2Pix models (middle column). It is interesting to note the results of the first row where the pre-trained InstructPix2Pix models almost fail significantly. Figure best viewed in color and zoomed in. See original [here](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/Instruction-tuning-sd/cartoonization_results.png). | But we can still leverage the findings from InstructPix2Pix to suit our customizations. On the other hand, paired datasets for tasks like [cartoonization](https://github.com/SystemErrorWang/White-box-Cartoonization), [image denoising](https://paperswithcode.com/dataset/sidd), [image deraining](https://paperswithcode.com/dataset/raindrop), etc. are available publicly, which we can use to build instruction-prompted datasets taking inspiration from FLAN V2. Doing so allows us to transfer the instruction-templating ideas explored in FLAN V2 to this work. ## Dataset preparation ### Cartoonization In our early experiments, we prompted InstructPix2Pix to perform cartoonization and the results were not up to our expectations. We tried various inference-time hyperparameter combinations (such as image guidance scale and the number of inference steps), but the results still were not compelling. This motivated us to approach the problem differently. As hinted in the previous section, we wanted to benefit from both worlds: **(1)** training methodology of InstructPix2Pix and **(2)** the flexibility of creating instruction-prompted dataset templates from FLAN. We started by creating an instruction-prompted dataset for the task of cartoonization. Figure 5 presents our dataset creation pipeline: | ![itsd_data_wheel](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/itsd_data_wheel.png) | |:--:| | **Figure 5**: A depiction of our dataset creation pipeline for cartoonization (best viewed in color and zoomed in). | In particular, we: 1. Ask [ChatGPT](https://openai.com/blog/chatgpt) to generate 50 synonymous sentences for the following instruction: "Cartoonize the image.” 2. We then use a random sub-set (5000 samples) of the [Imagenette dataset](https://github.com/fastai/imagenette) and leverage a pre-trained [Whitebox CartoonGAN](https://github.com/SystemErrorWang/White-box-Cartoonization) model to produce the cartoonized renditions of those images. The cartoonized renditions are the labels we want our model to learn from. So, in a way, this corresponds to transferring the biases learned by the Whitebox CartoonGAN model to our model. 3. Then we create our exemplars in the following format: | ![cartoonization_dataset_overview](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/cartoonization_dataset_overview.png) | |:--:| | **Figure 6**: Samples from the final cartoonization dataset (best viewed in color and zoomed in). | Our final dataset for cartoonization can be found [here](https://huggingface.co/datasets/instruction-tuning-vision/cartoonizer-dataset). For more details on how the dataset was prepared, refer to [this directory](https://github.com/huggingface/instruction-tuned-sd/tree/main/data_preparation). We experimented with this dataset by fine-tuning InstructPix2Pix and got promising results (more details in the “Training experiments and results” section). We then proceeded to see if we could generalize this approach to low-level image processing tasks such as image deraining, image denoising, and image deblurring. ### Low-level image processing We focus on the common low-level image processing tasks explored in [MAXIM](https://huggingface.co/papers/2201.02973). In particular, we conduct our experiments for the following tasks: deraining, denoising, low-light image enhancement, and deblurring. We took different number of samples from the following datasets for each task and constructed a single dataset with prompts added like so: | **Task** | **Prompt** | **Dataset** | **Number of samples** | |---|---|---|---| | Deblurring | “deblur the blurry image” | [REDS](https://seungjunnah.github.io/Datasets/reds.html) (`train_blur`<br>and `train_sharp`) | 1200 | | Deraining | “derain the image” | [Rain13k](https://github.com/megvii-model/HINet#image-restoration-tasks) | 686 | | Denoising | “denoise the noisy image” | [SIDD](https://www.eecs.yorku.ca/~kamel/sidd/) | 8 | | Low-light<br>image enhancement | "enhance the low-light image” | [LOL](https://paperswithcode.com/dataset/lol) | 23 | | | | | | Datasets mentioned above typically come as input-output pairs, so we do not have to worry about the ground-truth. Our final dataset is available [here](https://huggingface.co/datasets/instruction-tuning-vision/instruct-tuned-image-processing). The final dataset looks like so: | ![low_level_img_proc_dataset_overview](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/low_level_img_proc_dataset_overview.png) | |:--:| | **Figure 7**: Samples from the final low-level image processing dataset (best viewed in color and zoomed in). | Overall, this setup helps draw parallels from the FLAN setup, where we create a mixture of different tasks. This also helps us train a single model one time, performing well to the different tasks we have in the mixture. This varies significantly from what is typically done in low-level image processing. Works like MAXIM introduce a single model architecture capable of modeling the different low-level image processing tasks, but training happens independently on the individual datasets. ## Training experiments and results We based our training experiments on [this script](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py). Our training logs (including validation samples and training hyperparameters) are available on Weight and Biases: - [Cartoonization](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/wszjpb1b) ([hyperparameters](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/wszjpb1b/overview?workspace=)) - [Low-level image processing](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/2kg5wohb) ([hyperparameters](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/2kg5wohb/overview?workspace=)) When training, we explored two options: 1. Fine-tuning from an existing [InstructPix2Pix checkpoint](https://huggingface.co/timbrooks/instruct-pix2pix) 2. Fine-tuning from an existing [Stable Diffusion checkpoint](https://huggingface.co/runwayml/stable-diffusion-v1-5) using the InstructPix2Pix training methodology In our experiments, we found out that the first option helps us adapt to our datasets faster (in terms of generation quality). For more details on the training and hyperparameters, we encourage you to check out [our code](https://github.com/huggingface/instruction-tuned-sd) and the respective run pages on Weights and Biases. ### Cartoonization results For testing the [instruction-tuned cartoonization model](https://huggingface.co/instruction-tuning-sd/cartoonizer), we compared the outputs as follows: | ![cartoonization_full_results](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/cartoonization_full_results.png) | |:--:| | **Figure 8**: We compare the results of our instruction-tuned cartoonization model (last column) with that of a [CartoonGAN](https://github.com/SystemErrorWang/White-box-Cartoonization) model (column two) and the pre-trained InstructPix2Pix model (column three). It’s evident that the instruction-tuned model can more faithfully match the outputs of the CartoonGAN model. Figure best viewed in color and zoomed in. See original [here](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/Instruction-tuning-sd/cartoonization_full_results.png). | To gather these results, we sampled images from the `validation` split of ImageNette. We used the following prompt when using our model and the pre-trained InstructPix2Pix model: *“Generate a cartoonized version of the image”.* For these two models, we kept the `image_guidance_scale` and `guidance_scale` to 1.5 and 7.0, respectively, and number of inference steps to 20. Indeed more experimentation is needed around these hyperparameters to study how they affect the results of the pre-trained InstructPix2Pix model, in particular. More comparative results are available [here](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/g6cvggw2). Our code for comparing these models is available [here](https://github.com/huggingface/instruction-tuned-sd/blob/main/validation/compare_models.py). Our model, however, [fails to produce](https://wandb.ai/sayakpaul/instruction-tuning-sd/runs/g6cvggw2) the expected outputs for the classes from ImageNette, which it has not seen enough during training. This is somewhat expected, and we believe this could be mitigated by scaling the training dataset. ### Low-level image processing results For low-level image processing ([our model](https://huggingface.co/instruction-tuning-sd/low-level-img-proc)), we follow the same inference-time hyperparameters as above: - Number of inference steps: 20 - Image guidance scale: 1.5 - Guidance scale: 7.0 For deraining, our model provides compelling results when compared to the ground-truth and the output of the pre-trained InstructPix2Pix model: | ![deraining_results](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/deraining_results.png) | |:--:| | **Figure 9**: Deraining results (best viewed in color and zoomed in). Inference prompt: “derain the image” (same as the training set). See original [here](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/Instruction-tuning-sd/deraining_results.png). | However, for low-light image enhancement, it leaves a lot to be desired: | ![image_enhancement_results](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/image_enhancement_results.png) | |:--:| | **Figure 10**: Low-light image enhancement results (best viewed in color and zoomed in). Inference prompt: “enhance the low-light image” (same as the training set). See original [here](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/Instruction-tuning-sd/image_enhancement_results.png). | This failure, perhaps, can be attributed to our model not seeing enough exemplars for the task and possibly from better training. We notice similar findings for deblurring as well: | ![deblurring_results](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/instruction-tuning-sd/deblurring_results.png) | |:--:| | **Figure 11**: Deblurring results (best viewed in color and zoomed in). Inference prompt: “deblur the image” (same as the training set). See original [here](https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/Instruction-tuning-sd/deblurring_results.png). | We believe there is an opportunity for the community to explore how much the task mixture for low-level image processing affects the end results. *Does increasing the task mixture with more representative samples help improve the end results?* We leave this question for the community to explore further. You can try out the interactive demo below to make Stable Diffusion follow specific instructions: <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.29.0/gradio.js"></script> <gradio-app theme_mode="light" src="https://instruction-tuning-sd-instruction-tuned-sd.hf.space"></gradio-app> ## Potential applications and limitations In the world of image editing, there is a disconnect between what a domain expert has in mind (the tasks to be performed) and the actions needed to be applied in editing tools (such as [Lightroom](https://www.adobe.com/in/products/photoshop-lightroom.html)). Having an easy way of translating natural language goals to low-level image editing primitives would be a seamless user experience. With the introduction of mechanisms like InstructPix2Pix, it’s safe to say that we’re getting closer to that realm. However, challenges still remain: - These systems need to work for large high-resolution original images. - Diffusion models often invent or re-interpret an instruction to perform the modifications in the image space. For a realistic image editing application, this is unacceptable. ## Open questions We acknowledge that our experiments are preliminary. We did not go deep into ablating the apparent factors in our experiments. Hence, here we enlist a few open questions that popped up during our experiments: - ***What happens we scale up the datasets?*** How does that impact the quality of the generated samples? We experimented with a handful of examples. For comparison, InstructPix2Pix was trained on more than 30000 samples. - ***What is the impact of training for longer, especially when the task mixture is broader?*** In our experiments, we did not conduct hyperparameter tuning, let alone an ablation on the number of training steps. - ***How does this approach generalize to a broader mixture of tasks commonly done in the “instruction-tuning” world?*** We only covered four tasks for low-level image processing: deraining, deblurring, denoising, and low-light image enhancement. Does adding more tasks to the mixture with more representative samples help the model generalize to unseen tasks or, perhaps, a combination of tasks (example: “Deblur the image and denoise it”)? - ***Does using different variations of the same instruction on-the-fly help improve performance?*** For cartoonization, we randomly sampled an instruction from the set of ChatGPT-generated synonymous instructions **during** dataset creation. But what happens when we perform random sampling during training instead? For low-level image processing, we used fixed instructions. What happens when we follow a similar methodology of using synonymous instructions for each task and input image? - ***What happens when we use ControlNet training setup, instead?*** [ControlNet](https://huggingface.co/papers/2302.05543) also allows adapting a pre-trained text-to-image diffusion model to be conditioned on additional images (such as semantic segmentation maps, canny edge maps, etc.). If you’re interested, then you can use the datasets presented in this post and perform ControlNet training referring to [this post](https://huggingface.co/blog/train-your-controlnet). ## Conclusion In this post, we presented our exploration of “instruction-tuning” of Stable Diffusion. While pre-trained InstructPix2Pix are good at following general image editing instructions, they may break when presented with more specific instructions. To mitigate that, we discussed how we prepared our datasets for further fine-tuning InstructPix2Pix and presented our results. As noted above, our results are still preliminary. But we hope this work provides a basis for the researchers working on similar problems and they feel motivated to explore the open questions further. ## Links - Training and inference code: [https://github.com/huggingface/instruction-tuned-sd](https://github.com/huggingface/instruction-tuned-sd) - Demo: [https://huggingface.co/spaces/instruction-tuning-sd/instruction-tuned-sd](https://huggingface.co/spaces/instruction-tuning-sd/instruction-tuned-sd) - InstructPix2Pix: [https://huggingface.co/timbrooks/instruct-pix2pix](https://huggingface.co/timbrooks/instruct-pix2pix) - Datasets and models from this post: [https://huggingface.co/instruction-tuning-sd](https://huggingface.co/instruction-tuning-sd) *Thanks to [Alara Dirik](https://www.linkedin.com/in/alaradirik/) and [Zhengzhong Tu](https://www.linkedin.com/in/zhengzhongtu) for the helpful discussions. Thanks to [Pedro Cuenca](https://twitter.com/pcuenq?lang=en) and [Kashif Rasul](https://twitter.com/krasul?lang=en) for their helpful reviews on the post.* ## Citation To cite this work, please use the following citation: ```bibtex @article{ Paul2023instruction-tuning-sd, author = {Paul, Sayak}, title = {Instruction-tuning Stable Diffusion with InstructPix2Pix}, journal = {Hugging Face Blog}, year = {2023}, note = {https://huggingface.co/blog/instruction-tuning-sd}, } ```
blog/instruction-tuning-sd.md/0
{ "file_path": "blog/instruction-tuning-sd.md", "repo_id": "blog", "token_count": 6071 }
36
--- title: "Introducing ConTextual: How well can your Multimodal model jointly reason over text and image in text-rich scenes?" thumbnail: /blog/assets/leaderboards-on-the-hub/thumbnail_contextual.png authors: - user: rohan598 guest: true - user: hbXNov guest: true - user: kaiweichang guest: true - user: violetpeng guest: true - user: clefourrier --- # Introducing ConTextual: How well can your Multimodal model jointly reason over text and image in text-rich scenes? Models are becoming quite good at understanding text on its own, but what about text in images, which gives important contextual information? For example, navigating a map, or understanding a meme? The ability to reason about the interactions between the text and visual context in images can power many real-world applications, such as AI assistants, or tools to assist the visually impaired. We refer to these tasks as "context-sensitive text-rich visual reasoning tasks". At the moment, most evaluations of instruction-tuned large multimodal models (LMMs) focus on testing how well models can respond to human instructions posed as questions or imperative sentences (“Count this”, “List that”, etc) over images... but not how well they understand context-sensitive text-rich scenes! That’s why we (researchers from University of California Los Angeles) created ConTextual, a Context-sensitive Text-rich visuaL reasoning dataset for evaluating LMMs. We also released a leaderboard, so that the community can see for themselves which models are the best at this task. <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.45.1/gradio.js"> </script> <gradio-app theme_mode="light" space="ucla-contextual/contextual_leaderboard"></gradio-app> For an in-depth dive, you can also check these additional resources: [paper](https://arxiv.org/abs/2401.13311), [code](https://github.com/rohan598/ConTextual), [dataset](https://huggingface.co/datasets/ucla-contextual/contextual_all), [validation dataset](https://huggingface.co/datasets/ucla-contextual/contextual_val), and [leaderboard](https://huggingface.co/spaces/ucla-contextual/contextual_leaderboard). ## What is ConTextual ConTextual is a Context-sensitive Text-rich visual reasoning dataset consisting of 506 challenging instructions for LMM evaluation. We create a diverse set of instructions on text-rich images with the constraint that they should require context-sensitive joint reasoning over the textual and visual cues in the image. It covers 8 real-world visual scenarios - Time Reading, Shopping, Navigation, Abstract Scenes, Mobile Application, Webpages, Infographics and Miscellaneous Natural Scenes. (See the figure for a sample of each dataset). ![Real world visual scenarios examples](https://con-textual.github.io/static/images/teaser_figure.png) Each sample consists of: - A text-rich image - A human-written instruction (question or imperative task) - A human-written reference response The dataset is released in two forms: - (a) a validation set of 100 instances from the complete dataset with instructions, images, and reference answers to the instructions. - (b) a test dataset with instructions and images only. The leaderboard contains model results both on the validation and test datasets (the information is also present in the paper). The development set allows the practitioners to test and iterate on their approaches easily. The evaluation sandbox is present in our github. ## Experiments For our initial experiments, our benchmark assessed the performance of 13 models. We divided them into three categories: - **Augmented LLM approach**: GPT4 + visual information in the form of OCR of the image and/or dense image captions; - **Closed-Source LMMs**: GPT4V(ision) and Gemini-Vision-Pro; - **Open-Source LMMs**: LLaVA-v1.5-13B, ShareGPT4V-7B, Instruct-Blip-Vicuna-7B, mPlugOwl-v2-7B, Bliva-Vicuna-7B, Qwen-VL-7B and Idefics-9B. Our dataset includes a reference response for each instruction, allowing us to test various automatic evaluation methods. For evaluation, we use an LLM-as-a-judge approach, and prompt GPT-4 with the instruction, reference response, and predicted response. The model has to return whether the predicted response is acceptable or not. (GPT4 was chosen as it correlated the most with human judgement in our experiments.) Let's look at some examples! [Example 1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/leaderboards-on-the-hub/contextual-qualitative-ex-1.png) In this instance, GPT-4V provides an incorrect response to the instruction, despite its logical reasoning. The use of green indicates responses that match the reference, while red highlights errors in the responses. Additionally, a Summarized Reasoning is provided to outline the rationale used by GPT-4V to arrive at its answer. [Example 2](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/leaderboards-on-the-hub/contextual-qualitative-ex-2.png) In this example, GPT-4V correctly responds to the instruction. However, ShareGPT-4V-7B (best performing open-source LMM) and GPT-4 w/ Layout-aware OCR + Caption (Augmented LLM) produce a wrong response, due to lack of joint reasoning over text and image. You’ll find more examples like this in the Appendix section of our [paper](https://arxiv.org/abs/2401.13311)! ## Key Takeaways! While working on this, we found that: - Modern LMMs (proprietary and open models) struggle to perform on ConTextual dataset while humans are good at it, hinting at the possibility of model improvement to enhance reasoning over text-rich images, a domain with significant real-world applications. - Proprietary LMMs perform poorly in infographics reasoning that involves time reading, indicating a gap in their capabilities compared to humans. Notably, GPT-4V, the best performing model, surpasses humans in abstract reasoning, potentially due to exposure to memes and quotes data, but struggles in time-related tasks where humans excel. - For open-source models such as LLaVA-1.5-13B and ShareGPT-4V-7B, there is a strong gap between the domains on which they achieve acceptable human ratings (abstract and natural scene contexts) and the other domains ((time-reading, infographics, navigation, shopping, web, and mobile usage). It's therefore likely that many of the domains we cover in our samples are out-of-distribution for these models. Open-source models should therefore aim to increase the diversity in their training data. - Augmenting an LMMs with a Large Language Model, which receives visual information converted into text via OCR or captions, performs notably badly, with an human approval rate of 17.2%. Our samples need a combination of precise visual perception along with fine-grained nuanced vision-language alignment to be solved. Our analysis suggests promising next steps include: - developing enhanced image encoders, - creating highly accurate image descriptions, - facilitating fine-grained vision-language alignment to improve the model's perception and mitigate the occurrence of hallucinations. This, in turn, will lead to more effective context-sensitive text-rich visual reasoning. ## What’s next? We’d love to evaluate your models too, to help collectively advance the state of vision language models! To submit, please follow our guidelines below. We hope that this benchmark will help in developing nuanced vision-language alignment techniques and welcome any kind of collaboration! You can contact us here: [Rohan](rwadhawan7@g.ucla.edu) and [Hritik](hbansal@g.ucla.edu), and know more about the team here: [Rohan](https://web.cs.ucla.edu/~rwadhawan7/), [Hritik](https://sites.google.com/view/hbansal), [Kai-Wei Chang](https://web.cs.ucla.edu/~kwchang/), [Nanyun (Violet) Peng](https://vnpeng.net/). ## How to Submit? We are accepting submissions for both the test and validation sets. Please, follow the corresponding procedure below. ### Validation Set Submission To submit your validation results to the leaderboard, you can run our auto-evaluation code (Evaluation Pipeline with GPT4), following [these instructions](https://github.com/rohan598/ConTextual?tab=readme-ov-file#-evaluation-pipeline-gpt-4). We expect submissions to be json format as shown below: ```json {"model_name": {"img_url": "The boolean score of your model on the image, 1 for success and 0 for failure"}} ``` - Replace model name with your model name (string) - Replace img_url with img_url of the instance (string) - Value for an img url is either 0 or 1 (int) There should be 100 predictions, corresponding to the 100 urls of the val set. To make the submission please go to the [leaderboard](https://huggingface.co/spaces/ucla-contextual/contextual_leaderboard) hosted on HuggingFace and fill up the Submission form. ### Test Set Submission Once you are happy with your validation results, you can send your model predictions to [Rohan](rwadhawan7@g.ucla.edu) and [Hritik](hbansal@g.ucla.edu). Please include in your email: - A name for your model. - Organization (affiliation). - (Optionally) GitHub repo or paper link. We expect submissions to be json format similar to val set as shown below: ```json {"model_name": {"img_url": "predicted response"}} ``` - Replace model name with your model name (string) - Replace img_url with img_url of the instance (string) - Value for an img url is the predicted response for that instance (string) There should be 506 predictions, corresponding to the 506 urls of the test set.
blog/leaderboard-contextual.md/0
{ "file_path": "blog/leaderboard-contextual.md", "repo_id": "blog", "token_count": 2523 }
37
--- title: "Welcome Llama 3 - Meta's new open LLM" thumbnail: /blog/assets/llama3/thumbnail.jpg authors: - user: philschmid - user: osanseviero - user: pcuenq - user: ybelkada - user: lvwerra --- # Welcome Llama 3 - Meta’s new open LLM ## Introduction Meta’s Llama 3, the next iteration of the open-access Llama family, is now released and available at Hugging Face. It's great to see Meta continuing its commitment to open AI, and we’re excited to fully support the launch with comprehensive integration in the Hugging Face ecosystem. Llama 3 comes in two sizes: 8B for efficient deployment and development on consumer-size GPU, and 70B for large-scale AI native applications. Both come in base and instruction-tuned variants. In addition to the 4 models, a new version of Llama Guard was fine-tuned on Llama 3 8B and is released as Llama Guard 2 (safety fine-tune). We’ve collaborated with Meta to ensure the best integration into the Hugging Face ecosystem. You can find all 5 open-access models (2 base models, 2 fine-tuned & Llama Guard) on the Hub. Among the features and integrations being released, we have: - [Models on the Hub](https://huggingface.co/meta-llama), with their model cards and licenses - 🤗 Transformers integration - [Hugging Chat integration for Meta Llama 3 70b](https://huggingface.co/chat/models/meta-llama/Meta-Llama-3-70B-instruct) - Inference Integration into Inference Endpoints, Google Cloud & Amazon SageMaker - An example of fine-tuning Llama 3 8B on a single GPU with 🤗 TRL ## Table of contents - [What’s new with Llama 3?](#whats-new-with-llama-3) - [Llama 3 evaluation](#llama-3-evaluation) - [How to prompt Llama 3](#how-to-prompt-llama-3) - [Demo](#demo) - [Using 🤗 Transformers](#using-🤗-transformers) - [Inference Integrations](#inference-integrations) - [Fine-tuning with 🤗 TRL](#fine-tuning-with-🤗-trl) - [Additional Resources](#additional-resources) - [Acknowledgments](#acknowledgments) ## What’s new with Llama 3? The Llama 3 release introduces 4 new open LLM models by Meta based on the Llama 2 architecture. They come in two sizes: 8B and 70B parameters, each with base (pre-trained) and instruct-tuned versions. All the variants can be run on various types of consumer hardware and have a context length of 8K tokens. - [Meta-Llama-3-8b](https://huggingface.co/meta-llama/Meta-Llama-3-8B): Base 8B model - [Meta-Llama-3-8b-instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct): Instruct fine-tuned version of the base 8b model - [Meta-Llama-3-70b](https://huggingface.co/meta-llama/Meta-Llama-3-70B): Base 70B model - [Meta-Llama-3-70b-instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-instruct): Instruct fine-tuned version of the base 70b model In addition to these 4 base models, Llama Guard 2 was also released. Fine-tuned on Llama 3 8B, it’s the latest iteration in the Llama Guard family. Llama Guard 2, built for production use cases, is designed to classify LLM inputs (prompts) as well as LLM responses in order to detect content that would be considered unsafe in a risk taxonomy. A big change in Llama 3 compared to Llama 2 is the use of a new tokenizer that expands the vocabulary size to 128,256 (from 32K tokens in the previous version). This larger vocabulary can encode text more efficiently (both for input and output) and potentially yield stronger multilingualism. This comes at a cost, though: the embedding input and output matrices are larger, which accounts for a good portion of the parameter count increase of the small model: it goes from 7B in Llama 2 to 8B in Llama 3. In addition, the 8B version of the model now uses Grouped-Query Attention (GQA), which is an efficient representation that should help with longer contexts. The Llama 3 models were trained ~8x more data on over 15 trillion tokens on a new mix of publicly available online data on two clusters with 24,000 GPUs. We don’t know the exact details of the training mix, and we can only guess that bigger and more careful data curation was a big factor in the improved performance. Llama 3 Instruct has been optimized for dialogue applications and was trained on over 10 Million human-annotated data samples with combination of supervised fine-tuning (SFT), rejection sampling, proximal policy optimization (PPO), and direct policy optimization (DPO). Regarding the licensing terms, Llama 3 comes with a permissive license that allows redistribution, fine-tuning, and derivative works. The requirement for explicit attribution is new in the Llama 3 license and was not present in Llama 2. Derived models, for instance, need to include "Llama 3" at the beginning of their name, and you also need to mention "Built with Meta Llama 3" in derivative works or services. For full details, please make sure to read the [official license](https://huggingface.co/meta-llama/Meta-Llama-3-70B/blob/main/LICENSE). ## Llama 3 evaluation _Note: We are currently evaluating Meta Llama 3 individually and will update this section as soon as we get the results._ ## How to prompt Llama 3 The base models have no prompt format. Like other base models, they can be used to continue an input sequence with a plausible continuation or for zero-shot/few-shot inference. They are also a great foundation for fine-tuning your own use cases. The Instruct versions use the following conversation structure: ```bash <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_msg_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|> ``` This format has to be exactly reproduced for effective use. We’ll later show how easy it is to reproduce the instruct prompt with the chat template available in `transformers`. ## Demo You can chat with the Llama 3 70B instruct on Hugging Chat! Check out the link here: https://huggingface.co/chat/models/meta-llama/Meta-Llama-3-70B-instruct ## Using 🤗 Transformers With Transformers [release 4.40](https://github.com/huggingface/transformers/releases/tag/v4.40.0), you can use Llama 3 and leverage all the tools within the Hugging Face ecosystem, such as: - training and inference scripts and examples - safe file format (`safetensors`) - integrations with tools such as bitsandbytes (4-bit quantization), PEFT (parameter efficient fine-tuning), and Flash Attention 2 - utilities and helpers to run generation with the model - mechanisms to export the models to deploy In addition, Llama 3 models are compatible with `torch.compile()` with CUDA graphs, giving them a ~4x speedup at inference time! To use Llama 3 models with transformers, make sure to use the latest `transformers` release: ```jsx pip install -U "transformers==4.40.0" --upgrade ``` The following snippet shows how to use `Llama-3-8b-instruct` with transformers. It requires about 16 GB of RAM, which includes consumer GPUs such as 3090 or 4090. ```python import transformers import torch model_id = "meta-llama/Meta-Llama-3-8B-Instruct" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda", ) messages = [ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, {"role": "user", "content": "Who are you?"}, ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=256, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` > Arrrr, me hearty! Me name be Captain Chat, the scurviest pirate chatbot to ever sail the Seven Seas! Me be here to swab the decks o' yer mind with me trusty responses, savvy? I be ready to hoist the Jolly Roger and set sail fer a swashbucklin' good time, matey! So, what be bringin' ye to these fair waters? A couple of details: - We loaded the model in `bfloat16`. This is the type used by the original checkpoint published by Meta, so it’s the recommended way to run to ensure the best precision or to conduct evaluations. For real world use, it’s also safe to use `float16`, which may be faster depending on your hardware. - Assistant responses may end with the special token `<|eot_id|>`, but we must also stop generation if the regular EOS token is found. We can stop generation early by providing a list of terminators in the `eos_token_id` parameter. - We used the default sampling parameters (`temperature` and `top_p`) taken from the original meta codebase. We haven’t had time to conduct extensive tests yet, feel free to explore! You can also automatically quantize the model, loading it in 8-bit or even 4-bit mode. 4-bit loading takes about 7 GB of memory to run, making it compatible with a lot of consumer cards and all the GPUs in Google Colab. This is how you’d load the generation pipeline in 4-bit: ```python pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={ "torch_dtype": torch.float16, "quantization_config": {"load_in_4bit": True}, "low_cpu_mem_usage": True, }, ) ``` For more details on using the models with transformers, please check [the model cards](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct). ## Inference Integrations In this section, we’ll go through different approaches to running inference of the Llama 3 models. Before using these models, make sure you have requested access to one of the models in the official [Meta Llama 3](https://TODO) repositories. ### Integration with Inference Endpoints You can deploy Llama 3 on Hugging Face's [Inference Endpoints](https://ui.endpoints.huggingface.co/), which uses Text Generation Inference as the backend. [Text Generation Inference](https://github.com/huggingface/text-generation-inference) is a production-ready inference container developed by Hugging Face to enable easy deployment of large language models. It has features such as continuous batching, token streaming, tensor parallelism for fast inference on multiple GPUs, and production-ready logging and tracing. To deploy Llama 3, go to the [model page](https://huggingface.co/meta-llama/Meta-Llama-3-70B-instruct) and click on the [Deploy -> Inference Endpoints](https://ui.endpoints.huggingface.co/philschmid/new?repository=meta-llama/Meta-Llama-3-70B-instruct&vendor=aws&region=us-east-1&accelerator=gpu&instance_size=4xlarge&task=text-generation&no_suggested_compute=true&tgi=true&tgi_max_batch_prefill_tokens=16384&tgi_max_batch_total_tokens=16384&tgi_max_input_length=4000&tgi_max_total_tokens=8192) widget. You can learn more about [Deploying LLMs with Hugging Face Inference Endpoints](https://huggingface.co/blog/inference-endpoints-llm) in a previous blog post. Inference Endpoints supports [Messages API](https://huggingface.co/blog/tgi-messages-api) through Text Generation Inference, which allows you to switch from another closed model to an open one by simply changing the URL. ```bash from openai import OpenAI # initialize the client but point it to TGI client = OpenAI( base_url="<ENDPOINT_URL>" + "/v1/", # replace with your endpoint url api_key="<HF_API_TOKEN>", # replace with your token ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "user", "content": "Why is open-source software important?"}, ], stream=True, max_tokens=500 ) # iterate and print stream for message in chat_completion: print(message.choices[0].delta.content, end="") ``` ### Integration with Google Cloud You can deploy Llama 3 on Google Cloud through Vertex AI or Google Kubernetes Engine (GKE), using [Text Generation Inference](https://huggingface.co/docs/text-generation-inference/index). To deploy the Llama 3 model from Hugging Face, go to the [model page](https://huggingface.co/meta-llama/Meta-Llama-3-70B-instruct) and click on [Deploy -> Google Cloud.](https://console.cloud.google.com/vertex-ai/publishers/meta-llama/model-garden/Meta-Llama-3-70B-instruct;hfSource=true;action=deploy) This will bring you to the Google Cloud Console, where you can 1-click deploy Llama 3 on Vertex AI or GKE. ### Integration with Amazon SageMaker You can deploy and train Llama 3 on Amazon SageMaker through AWS Jumpstart or using the [Hugging Face LLM Container](https://huggingface.co/blog/sagemaker-huggingface-llm). To deploy the Llama 3 model from Hugging Face, go to the [model page](https://huggingface.co/meta-llama/Meta-Llama-3-70B-instruct) and click on [Deploy -> Amazon SageMaker.](https://huggingface.co/meta-llama/Meta-Llama-3-70B-instruct?sagemaker_deploy=true) This will display a code snippet you can copy and execute in your environment. Amazon SageMaker will now create a dedicated inference endpoint you can use to send requests. ## Fine-tuning with 🤗 TRL Training LLMs can be technically and computationally challenging. In this section, we’ll look at the tools available in the Hugging Face ecosystem to efficiently train Llama 3 on consumer-size GPUs. Below is an example command to fine-tune Llama 3 on the [No Robots dataset](https://huggingface.co/datasets/HuggingFaceH4/no_robots). We use 4-bit quantization, and [QLoRA](https://arxiv.org/abs/2305.14314) and TRL’s SFTTrainer will automatically format the dataset into `chatml` format. Let’s get started! First, install the latest version of 🤗 TRL. ```bash pip install -U transformers trl accelerate ``` If you just want to chat with the model in the terminal you can use the `chat` command of the TRL CLI (for more info see the [docs](https://huggingface.co/docs/trl/en/clis#chat-interface)): ```jsx trl chat \ --model_name_or_path meta-llama/Meta-Llama-3-8B-Instruct \ --device cuda \ --eos_tokens "<|end_of_text|>,<|eod_id|>" ``` You can also use TRL CLI to supervise fine-tuning (SFT) Llama 3 on your own, custom dataset. Use the `trl sft` command and pass your training arguments as CLI argument. Make sure you are logged in and have access the Llama 3 checkpoint. You can do this with `huggingface-cli login`. ```jsx trl sft \ --model_name_or_path meta-llama/Meta-Llama-3-8B \ --dataset_name HuggingFaceH4/no_robots \ --learning_rate 0.0001 \ --per_device_train_batch_size 4 \ --max_seq_length 2048 \ --output_dir ./llama3-sft \ --use_peft \ --load_in_4bit \ --log_with wandb \ --gradient_checkpointing \ --logging_steps 10 ``` This will run the fine-tuning from your terminal and takes about 4 hours to train on a single A10G, but can be easily parallelized by tweaking `--num_processes` to the number of GPUs you have available. _Note: You can also replace the CLI arguments with a `yaml` file. Learn more about the TRL CLI [here](https://huggingface.co/docs/trl/clis#fine-tuning-with-the-cli)._ ## Additional Resources - [Models on the Hub](https://huggingface.co/collections/meta-llama/meta-llama-3-66214712577ca38149ebb2b6) - [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) - [Chat demo on Hugging Chat](https://huggingface.co/chat/models/meta-llama/Llama-3-70b-instruct) - [Meta Blog](https://ai.meta.com/blog/meta-llama-3/) - [Google Cloud Vertex AI model garden](https://console.cloud.google.com/vertex-ai/publishers/meta/model-garden/llama3) ## Acknowledgments Releasing such models with support and evaluations in the ecosystem would not be possible without the contributions of many community members, including - [Clémentine Fourrier](https://huggingface.co/clefourrier), [Nathan Habib](https://huggingface.co/SaylorTwift), and [Eleuther Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) for LLM evaluations - [Olivier Dehaene](https://huggingface.co/olivierdehaene) and [Nicolas Patry](https://huggingface.co/Narsil) for [Text Generation Inference Support](https://github.com/huggingface/text-generation-inference) - [Arthur Zucker](https://huggingface.co/ArthurZ) and [Lysandre Debut](https://huggingface.co/lysandre) for adding Llama 3 support in transformers and tokenizers - [Nathan Sarrazin](https://huggingface.co/nsarrazin), [Victor Mustar](https://huggingface.co/victor), and Kevin Cathaly for making Llama 3 available in Hugging Chat. - [Yuvraj Sharma](https://huggingface.co/ysharma) for the Gradio demo. - [Xenova](https://huggingface.co/Xenova) and [Vaibhav Srivastav](https://huggingface.co/reach-vb) for debugging and experimentation with quantization and prompt templates. - [Brigitte Tousignant](https://huggingface.co/BrigitteTousi), [Florent Daudens](https://huggingface.co/fdaudens), [Morgan Funtowicz](https://huggingface.co/mfuntowicz), and [Simon Brandeis](https://huggingface.co/sbrandeis) for different items during the launch! - Thank you to the whole Meta team, including [Samuel Selvan](https://huggingface.co/samuelselvanmeta), Eleonora Presani, Hamid Shojanazeri, Azadeh Yazdan, Aiman Farooq, Ruan Silva, Ashley Gabriel, Eissa Jamil, Binh Tang, Matthias Reso, Lovish Madaan, Joe Spisak, and Sergey Edunov. Thank you to the Meta Team for releasing Llama 3 and making it available to the open-source AI community!
blog/llama3.md/0
{ "file_path": "blog/llama3.md", "repo_id": "blog", "token_count": 5435 }
38
--- title: "3D Asset Generation: AI for Game Development #3" thumbnail: /blog/assets/124_ml-for-games/thumbnail3.png authors: - user: dylanebert --- # 3D Asset Generation: AI for Game Development #3 **Welcome to AI for Game Development!** In this series, we'll be using AI tools to create a fully functional farming game in just 5 days. By the end of this series, you will have learned how you can incorporate a variety of AI tools into your game development workflow. I will show you how you can use AI tools for: 1. Art Style 2. Game Design 3. 3D Assets 4. 2D Assets 5. Story Want the quick video version? You can watch it [here](https://www.tiktok.com/@individualkex/video/7190364745495678254). Otherwise, if you want the technical details, keep reading! **Note:** This tutorial is intended for readers who are familiar with Unity development and C#. If you're new to these technologies, check out the [Unity for Beginners](https://www.tiktok.com/@individualkex/video/7086863567412038954) series before continuing. ## Day 3: 3D Assets In [Part 2](https://huggingface.co/blog/ml-for-games-2) of this tutorial series, we used **AI for Game Design**. More specifically, we used ChatGPT to brainstorm the design for our game. In this part, we'll talk about how you can use AI to generate 3D Assets. The short answer is: you can't. That's because text-to-3D isn't at the point it can be practically applied to game development, *yet*. However, that's changing very quickly. Keep reading to learn about [The Current State of Text-to-3D](#the-current-state-of-text-to-3d), [Why It Isn't Useful (yet)](#why-it-isnt-useful-yet), and [The Future of Text-to-3D](#the-future-of-text-to-3d). ### The Current State of Text-to-3D As discussed in [Part 1](https://huggingface.co/blog/ml-for-games-1), text-to-image tools such as Stable Diffusion are incredibly useful in the game development workflow. However, what about text-to-3D, or generating 3D models from text descriptions? There have been many very recent developments in this area: - [DreamFusion](https://dreamfusion3d.github.io/) uses 2D diffusion to generate 3D assets. - [CLIPMatrix](https://arxiv.org/abs/2109.12922) and [CLIP-Mesh-SMPLX](https://github.com/NasirKhalid24/CLIP-Mesh-SMPLX) generate textured meshes directly. - [CLIP-Forge](https://github.com/autodeskailab/clip-forge) uses language to generate voxel-based models. - [CLIP-NeRF](https://github.com/cassiePython/CLIPNeRF) drives NeRFs with text and images. - [Point-E](https://huggingface.co/spaces/openai/point-e) and [Pulsar+CLIP](https://colab.research.google.com/drive/1IvV3HGoNjRoyAKIX-aqSWa-t70PW3nPs) use language to generate 3D point clouds. - [Dream Textures](https://github.com/carson-katri/dream-textures/releases/tag/0.0.9) uses text-to-image to texture scenes in Blender automatically. Many of these approaches, excluding CLIPMatrix and CLIP-Mesh-SMPLX, are based on [view synthesis](https://en.wikipedia.org/wiki/View_synthesis), or generating novel views of a subject, as opposed to conventional 3D rendering. This is the idea behind [NeRFs](https://developer.nvidia.com/blog/getting-started-with-nvidia-instant-nerfs/) or Neural Radiance Fields, which use neural networks for view synthesis. <figure class="image text-center"> <img src="https://developer-blogs.nvidia.com/wp-content/uploads/2022/05/Excavator_NeRF.gif" alt="NeRF"> <figcaption>View synthesis using NeRFs.</figcaption> </figure> What does all of this mean if you're a game developer? Currently, nothing. This technology hasn't reached the point that it's useful in game development *yet*. Let's talk about why. ### Why It Isn't Useful (yet) **Note:** This section is intended for readers who are familiar with conventional 3D rendering techniques, such as [meshes](https://en.wikipedia.org/wiki/Polygon_mesh), [UV mapping](https://en.wikipedia.org/wiki/UV_mapping) and [photogrammetry](https://en.wikipedia.org/wiki/Photogrammetry). While view synthesis is impressive, the world of 3D runs on meshes, which are not the same as NeRFs. There is, however, [ongoing work on converting NeRFs to meshes](https://github.com/NVlabs/instant-ngp). In practice, this is reminiscient of [photogrammetry](https://en.wikipedia.org/wiki/Photogrammetry), where multiple photos of real-world objects are combined to author 3D assets. <figure class="image text-center"> <img src="https://github.com/NVlabs/instant-ngp/raw/master/docs/assets_readme/testbed.png" alt="NeRF-to-mesh"> <figcaption>NVlabs instant-ngp, which supports NeRF-to-mesh conversion.</figcaption> </figure> The practical use of assets generated using the text-to-NeRF-to-mesh pipeline is limited in a similar way to assets produced using photogrammetry. That is, the resulting mesh is not immediately game-ready, and requires significant work and expertise to become a game-ready asset. In this sense, NeRF-to-mesh may be a useful tool as-is, but doesn't yet reach the transformative potential of text-to-3D. Since NeRF-to-mesh, like photogrammetry, is currently most suited to creating ultra-high-fidelity assets with significant manual post-processing, it doesn't really make sense for creating a farming game in 5 days. In which case, I decided to just use cubes of different colors to represent the crops in the game. <figure class="image text-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/124_ml-for-games/cubes.png" alt="Stable Diffusion Demo Space"> </figure> Things are changing rapidly in this area, though, and there may be a viable solution in the near future. Next, I'll talk about some of the directions text-to-3D may be going. ### The Future of Text-to-3D While text-to-3D has come a long way recently, there is still a significant gap between where we are now and what could have an impact along the lines of text-to-image. I can only speculate on how this gap will be closed. There are two possible directions that are most apparent: 1. Improvements in NeRF-to-mesh and mesh generation. As we've seen, current generation models are similar to photogrammetry in that they require a lot of work to produce game-ready assets. While this is useful in some scenarios, like creating realistic high-fidelity assets, it's still more time-consuming than making low-poly assets from scratch, especially if you're like me and use an ultra-low-poly art style. 2. New rendering techniques that allow NeRFs to be rendered directly in-engine. While there have been no official announcements, one could speculate that [NVIDIA](https://www.nvidia.com/en-us/omniverse/) and [Google](https://dreamfusion3d.github.io/), among others, may be working on this. Of course, only time will tell. If you want to keep up with advancements as they come, feel free to follow me on [Twitter](https://twitter.com/dylan_ebert_). If there are new developments I've missed, feel free to reach out! Click [here](https://huggingface.co/blog/ml-for-games-4) to read Part 4, where we use **AI for 2D Assets**. #### Attribution Thanks to Poli [@multimodalart](https://huggingface.co/multimodalart) for providing info on the latest open source text-to-3D.
blog/ml-for-games-3.md/0
{ "file_path": "blog/ml-for-games-3.md", "repo_id": "blog", "token_count": 2090 }
39
<jupyter_start><jupyter_text>Training Decision Transformers with 🤗 transformers In this tutorial, **you’ll learn to train your first Offline Decision Transformer model from scratch to make a half-cheetah run.** 🏃❓ If you have questions, please post them on study-group discord channel 👉 https://discord.gg/aYka4Yhff9🎮 Environments:- [Half Cheetah](https://www.gymlibrary.dev/environments/mujoco/half_cheetah/)⬇️ Here's what you'll achieve at the end of this tutorial. ⬇️<jupyter_code>%%html <video controls autoplay><source src="https://huggingface.co/edbeeching/decision-transformer-gym-halfcheetah-expert/resolve/main/replay.mp4" type="video/mp4"></video><jupyter_output><empty_output><jupyter_text>Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 [Read the tutorial](https://huggingface.co/blog/train-decision-transformers) Step 0: Set the GPU 💪- To **faster the agent's training, we'll use a GPU** to do that go to `Runtime > Change Runtime type`![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAzIAAAKTCAYAAADCAj3SAAAgAElEQVR4nOzde1wU5f7A8Q8X2RBEpTBK8YJ4Y/WXoElrXlBSvBSoIWaEFzTTMLPwkJFFnuNBM8nUUDPUksxzkFLQVPKgm5akJnhU0AIzRU4keQk1Lgr7+2Nh2YVFAbn7fb9evHRnnpl5npnZ2ec78zzPmGg0Gg1NRF7WaZJP/0YugOUjOPfsgb11fecK8q5dIw/ggVa0eqCGV37jGnnWrajp1VZPHtfOJJP0Wy5gySO9XOjxUMPIWTl/7GJ2r9FEZE3jy+xIxj1U+UXz/jhN8slqnGd51zh3JolzV7XLufbuUfPnQz2r7newVr8jVXE7j3OnDmmPEdC6kys9Olb++5X3azKHShfGtXunCsqTxRe+j+C3FcZ98RtfTrTXLq93blk+4oyLkz0PmN9jme5V3jVOH0/iN+1B5ZEeLvSwb/gnbrW/p1XZRnV/c+7xPKtV9XW8b1zj2m3AvBWt6up3u7rH4cY5ko+e46p2Ifr37FT/39M6ortW8wCtdBe30uvZokQNbz1Rb5njmjZzPGDdqvSY/PBPTFQLYPxmfot+HvuKFq+Da4aRrXKtONMPtKr8NeDatWu0bt2aDtYmnJlghVkXW5q5d9DOLLjNni9usOqPngDkXLvKxfPn+D3zIrmzw2H486UruvEnBI2GsydYvnw5c+fOrX5JruXxQCV/wLO2PMsjz39leEz0rz1V2P8mTSmQEaKyzm0cjWPALuzfTOBc2NCGUYEQ9xHjgYwQQjQseWSlX6OVk33Fv5N5+5hn6UE4Q4lMS2CaUx1m74/TnKMHne5wM/LcJx44ztgHf0sgd2nT+L1Xq9UMGTIErw5m/PspS8y62GLW1ZbC89co/Pkqf964zTPfW3Mq6wa5f900XDh4rTaY0QtiJk+ezKefflpn+TcayFSTaY3lSojGIm8f60N2AS7M820aFzUhhBCiRp1Zz+guljzSZRzhP+RVmCx5zQLCAUY+z9A6C2Ly2BfizCN2zvR//QvOVZTsj12Ev7MPsOetkf2b3O/9/9maAVCYdoWCr9MpPPUHB88X4Pl1Lkd/+Z3cv27y2GOPsXz5cpYvX65daOlM+GpNvQUxNSpvH/fJw1AhSl3b/QX/zALGz8Ovd33n5t5ccehS31kQ1TQCJyC9vrMhhBDGdR/NNJU9u9ITWTD2WfhiPW8N0bt3npfFvuXT8AtJBOwJes2PTnWWuQfoP86PTosXkBjlh18rWP/35+nRqjTFteNfsGCWHxFZwMhFTBvS1MIYOH+jiIO/FXKtQMOJy0XsOH+b/14pAmDw4MF8+umndOzYUZe+VatWTJ06FVa/AcBjjz3WeIMYtPU5CWTEfSaZ9X9fD9jz1qxx9/Q4UwghhGi67Bm3+isWXRzHgv27WDD0ERbY92Bor0fg9m+k7j9NVnG6UR/sYtGwug0UHuj7Fpujz+Hhu57EVX44r/Kj0xND6dQCcs/tI7HkPtETgXy9elodBll1JyrtNlFptw2mdejQgXfffZcpU6aUS18yberUqTz22GOo1eo6yWft0NbnJJAR95cf9rHPbihDx4/j2SZ4d0Y0LkOHDaVTAxiQRAghjLJW8dY3yajWLCJ8ZQS70k+zL+t08Ux7ejzjx4KwBTzfs9UdV1NbOo2PJOnkKCLCFvDRltOc+2FfaTMzJxXTZixi0WtDsW9itd1WrVoxePBg3f9799Y2LxkzZozu/xWZMmUKrVq1wt3dnVat6ue4Yd2JocOGQrt72H5xfU46+wvRiEnTssbNNiOtvrMghBCVVjpqGfU/ymRZeqOWGY6qJpqyJhajCiGEEEKI2lCVIYLrXEMLrESdkEBGiCZG/y6/PHCtXyYmJrr/y9MzIYQQomZJICNEE2MseJGApm6VBDCy34UQQojaI4GMEE2QfgVaKtN1T6PRGDyN0f+/EEIIIWqGBDJCNDEajUb3V/JZ/19Ru0qClrL/CiGEEKJmSSAjRBNTEsScO1fhu45FHerUqSm+vUAIIYSofxLICNHEFBUVUVSkfbNvly7Swbw+paWlUVhYiKmpaX1nRQghhGhyJJARookpKiqisLCwvrMhismxEEIIIWqHBDJCNDESyDQshYWF0k9GCCGEqAXS3kGIJka/aZmof3I8hBBCiNohgYwQTYw8kWlYCgsLJZARQgghaoEEMkI0MfpDL4v6J8dDCCGEqB0SyAjRxGg0GnkC0IAUFRVJICOEEELUAglkhBBCCCGEEI2OBDJCNEHyBKDhkGMhhBBC1A4JZIRoYqTi3PDIMRFCCCFqXo0GMvk5+WWmJBHm6IhjQDTZNbmhhiIrmgBHRxyXJBlMLr8fGon8HPJlsCvRCDTa71ij08Sv4Xd0P5e9Cm7nkltQ35kQQtyv7hDIZBMd4Iij453/AmK0l/ic+Pn07t2D4evS6yjr1VW5cpUNTiqrvvdDxmYfHB0dGbwq5c4Jz0fh4+iI46AIUgqBnHjm9+1NjxGRNPQjWHklxzqA6Kz6zksD92NYBd+F3gz3nkVYTBLZDSTIre/vWG1KWlKJa1PJ3/1awS45V+94jb5PvvvX0znwrwiWLQwhODiY4OAQQpdFsP1wJtfrZLyP82z/Zyih76zj0PW62J4QQhgyv3sSB9wne6G0MT63TUcFAAobGxQoaGfXsgazV5vuXC6c7aq1VuP7IZvoADfmq6cT80sIrtVac+U4jJqE59tJxG9Vk/KyEqWZ8XTp6miSANeXvLRpFDbYWICiYxsayxFs6rJjAnALVjM9+hdC+tbRRl28CBzgUPr5Sjrxu+KJDI4n6tNANkcH4dq8brJSUfkb37Wm8uxcAgl8RX9KDinbolBfNHK9auOEoo7zJxqQrARWfBhPZhGYt2hLd9c2WBZc4XzaeQ59uYJDiZ7Mm+NBm1ptQG6OpSVAC2zlZBRC1INKBDKdGfFSEL72d06lUIVw/JeQmslVnahcuaqq3vdDa3dGjoL4XdGoUwNR9jKWKJ3Er1IAV7xLKq0KFSHHfqExHUFRCx6fRNBrhqF20LvZqN/1IWBzBGHbfInxc6hg4bpR79+xWuTgGUSQp/6UbKJPRqG+WDvXK9FYZbJzYzyZRZb0fH4ek3q3KJ1VdJ3kL5ax5UQ8Eds6sPBZp1rMR1s8g5fiefeEQghRK6Szf5Njg6ePP5BBtLqC5mWp8USeBEZNwrtDXeZNNEpmdri/FEh/ICkh8f5sziREQ3LpDGeuAg/1Z4R+EANg2gKX58fT0xRyjx7ilLxSSgjRhNVcIFPcbrmkz8xdZSUSGeSDWy9te+/ew2YRtiOFnAbSDr+s7MRI5j7bW9s2u89wZi2JIz3HSEKD/VDSTtuN+WqASG2/FMcwqtcDp3IUqhH4W0HGuliSjOzPlG+jyQA8Pd0pbalyh46tOSnELZnF8D7aY9Wjvw9z1yXq9ZlIJ8rbEcde4eXKlf7ZMzg6DibiZJkZZ6N4xtGRHtXsi3QvsmMCcHR0JOzHfNJ3hDFrmN5xXZVovC9ImfO1/D7Qc9f9Bfr7O/1kZHEeehCeXJo/t2A1AJG+jsX5rYWdUVm2bWhXZlJJf47y+TLWP6G0vBnZ+vuyB26+oUT/XNp5/67lN3atKR54Y/CaFO131deNHiV9fGZFkJgNFOaQEhOKT/8eht/jv4yUt1LHsAGpyvlZnfRlFeaQov/dcezN8FlhxKUauyg2PNnflT1HKpn34vMsICbD8Dzr5YbP29Gk1HXxb90i19h00w70c3fBpffDmN/Um56byZFtZfrUrNzEgV/11vLnAVYEBxO88gDlur0UnSEmNJjg0BjOFAFcImFZMMHBmzilS3SKTcHBBC9L4NLVZLavXkLI/GCCg4MJWbyBhF+N5fg2V5JiWFGSr5BQVnx+gPNXj7AhOJjgz08ZWUYIISrVtKzm5f8Yjs/UCFIsXPGaEIiDVT4Z30UT+Wo86pQYdsx3bVBtvzNiAhgerCbfyg53v0CUtjmkbH2D4Zu5Sz5tUL2ymc0B2ex/fy6RJz0J2TQJJXbU5sN+FCq8/RRErYtiT3IIrgb9K1JQb8kA/JkwvKIOQnouxjHLey7xBXa4jwnE0xZyUqOJXuJH/NElfLPWFwczJ1xHOMD7alLOBuHauWThDBLjtE+For9LIbCXUrfa7BP7SUHB9Kdqs8fQnanffIbIK+A5yp/AwRkkbosjfrkfZ2/HsOM1vXMwNZJnJoSRgtLgfI0z2AfFaSu1v/QykR7Jy8+nk27hhLKXggcAmydms3nTi2Srw5i7IQXPBZuZ1BXsOlNv8lMPkQhg0/LevpvpkUwbmsGfPTzxDlCRnxpNdEIU85/N4IG9G/Cyv7fyZ6z2YRA2qMb4Ml2VQ/quWOLjw/FL/5MQDzVhm//Efcx0Am1zSNkaTfy6uaizFBz+0LM0qK/qMaxnuusp2vwq9fMbV75fU1XTl5dDfMggZm3NwWaAP4GjbOBKCtHbI5kbr+ZU9A5C+jakK7i+fJKW++C3KgWFixf+rziguJlB4rZI5j5d+byrFw5nEDYoh3szXZWvPc82z+eZH9KJiQ2p/X5kbXrh0iae+EsH2PKvtszwccHW4Ne8Bd1HTKS7/qTcU2xauolTN81p0bE7Lt2aQcEV0s+cYufqs2ROeZuJzubQ0oXHO+4k89dkkv8cxCD9rmhppziZC5Zuvel+t9ug1w8Q8V4ut1p2oHvvDtz+30nOZJ0hfu06eP1VPNqUJr30nwiWfZMJpua06e5C2+a5ZKbsJELiFyHEXdR9IJOfRPjrEaQ8HsI3H03HqeSC/1oQExYNx2/da0SO/pZAo307apKa+f0dmW9s1owYfplfXMG+Gk/4QjX5Vu4s2b0B35Lb0nOCSHzfB787jpykwMFFhQPZZGwAcMB1gKpWO/uXcB09G4d14UT9J4mQvnpbPKkm+iIoJo9Addff62yi35lLvJU/G/YsxF03/kEQ0/8VwOCQ+YTtGsGaZ2xQPu6Jgkj2J2fj37k4YVYi+5NBYaUgY4ualFlKtKFMPilJasCf/rV+nCuWjjsxB/UqHa+PIdQtgKhVe0ia44rKTJtX9ZYwUm4qCdmzg+ldi9OWnK8bQolM9GbhAAVV2V86FzNoF/Yt3zyn3+/EFVU7yM7SrsDh/1So6qqzf1mF+WQkRxH2UiQZOBE0Q6/CXx0XM2i39Bu+8SkpbxD+n/kwfKGaaHUGXs85oGh3L+X35L19H+JVsu/nzCYuaBBz4yIJy5/O5u9DUJUUYI4/Ub7DCY37N3vmexb3P6nGMaxPJddTylyfivM7PCQCv/f7czxUpQ1Aq5remLOxfLQ1B8Zv4PB77rp0QS9FEzBoPpGLo/H/0p9a70m1zgfHdVVbJP/HcF5blYJqwTesCdAbLOH1CYSN8CPy9Ui89wdWOEiKTmtf1nypd37ofg8ieW29N9++orzj4veuDR4vjOHsx9tJT9rCkuNbadO1P/0GuvB4p7ZYGvllP//tHs6bt6Dnc/OY5GpZOuPCTpZ8dIBk9RGedu5PC1rQ74nubP/1DMknrjNoYGnTtTMnTpKLLYNUlbgVl5uL7fB5vPpUScQykfM7lhBxMJOjxy/hMbx4+p+H2PqfTDBti+fcV/Eo6Qem6+tj9JmTEEIAlWpapq3w32no5arISdhE5EXwn+RfGsQAoEA1cTpKMtiTVBfDqjrgPjmQwFeM/LmUjliWk7ibuJvgOn+h3o8+YGaDasp03Osgp9Xi7I5vO8jfHEui3is3UtTRZKDA16MST71So4lQg3Kav16FTsth7CT8gfgjSeQDuGibs6m/S6KkdUX2d3tQ4897Yb5wcQ9JZ3W54Nh2wKs/rvV443b638vcOW2uYsgYgHQydKd2PvlGhxVVoHppNZs3bcC7Y/GkquyvEqqFLHyufjvPG1jnY/g979KDwb5hxNOfoM2bCXS+x/WrFrLQx7C8TgO8UQKHfq2B3jd+k0qDGND27xmu7YqsnOZbGsQAmDnh+pQDoCb9YvG06hzDelRyPS13fQIcnltIiAvkf/Zv1DnVS29Ufg5GZ7fzZmH0Zja/pqqbkQ9dvIxfv18JJPAVf9zLtoUkB/XnkWTgzyS/MiO+NVfhO02pvU79Woltj/I2PD/MbFC9FoI/kLFVzV0Gv68Z9v2Z8dZ8Jg3ujm2z21w6c4Cdn6wgdEEIyz4/QHqZ61aHEfN4+623DYMYgPYdeRTgr1xdMzXzni50N4XM5GS95mXpnDqVC6174vJoJfL3kAcTn2pjMKnDYz1pAVy5dEk37fqpZM4XQYsnx5QGMaDt6zN8EIZrEEIIQ/c0/HLJ0MtVkX4iDoCogB5EVZTot7poaFy5UYBK8uvarQFVNivDTInnNCXhC6PZczQE1QAFFKag3poB7YLwvfvjGLJTj5EBsHA4jgsrSJR5iRzAzkxJ/zEQufkQSe954q7IIVGtBq81uA9X4E8U0d+l49/ZCc6moL4J7gNc7+3u/r1qVnaCAhurstNscH9hOk5xkYQ968ahMS8ywl2FSqXEwU6JSq8yU6X9VbpJHrjXctQkg+GX80nfFUn8WSdCtnxe+jTqXhgrr5WC6g12Xjk2bbRVITur8tXrNnadQXvUgGoew3qkvT45MMTF2PXJAZWXEpLjOPbzh3j2rXp6o7p5EejxEfO3BuCW7oXvuKGMeEKFsqMdDn1Vtf8kpoSREfZKlYz2pj8tnWNxAFEE9Kjw14eMq9XMj0KJ0h1QH+NsNijr4gQxt6Xn6AB6jr5N7v9+5WjyAY78cIZLJ3ayLjUZzzmvGgYH1zM58p/tHDiRyaWbtyter4UL/Xpu4cwJveZlacc5mQttBvajbWXyZmqkevGAJZZg0Pfm/LnzAHToJCPPCCGqrsaGX66a2nmHS+1xx6nc3b2Gz8ndF9eFoUQnJBEyQIUiOZaPLoLDa+53bzqhx3VsIKqKyq97l4UCV5UXbI7mWOpC3Dsnsm8XeH2kwkahYIgfRMUlkjHZiQeS92ubtgxoaMfZOEXfEGL29id69Ud8sj0M9WbtdBsXf0L+EYSvs+GJXLn91UCVqRzmD4ThvpGER6nx/4d7w857DWpcx7AzdrbG57S0MvYdq2r6Mswc8F17AKdtn/DJx9FEvh1HJICVHe4zPuS9l1XYNaA+ROW0c8d/rLLCmyg9q31ZssOhuC6eV+eDQphj+agTgx51YtDI66R/vYF1BzOJ37iT7m8+rQ08Lh1gxYc7ySxqQYfeHvh0ty2+l3OeQ18c4nyZNfZ8vB+WJ47ompelnzhJLm3xeLw2npHY8vBDtbBaIUSTVy+d/QH6jA+692YqdUGhABLJ+A1obO9w6DACX/dQ5sfsIWm+CsV/oshHyfRRVWu/3c7jRYJG3f3ZiY1rf9yJQ30inRd/200cnnxY3JZHG+TsJzHLlzb/VUOvhbg2ov1p09md6eHuTC/MJyc7g5TvoglfGMn8pxPJ3vmNwblc2f3VGCj6vkigeyTzN4cROdG9cXxna0DjOoZnyb6C0evTpeyz5SdWOb0RZna4+oSwxicE/soh46ya+NUfEbbcj0Fn1xgOntDg9MH3tUBqvhdLNhnF0cADtRzIZSZu4cB56DBgIv3LBtymLXB6ZgyDTkRw4OpZzlyGtg/CqW92knnbkn7T3sanm/4CzTj5xaHyG+nWm16WRziSnMz1gY9y/EQutHPBpYbbDZqbA1zh9z9ofL+xQoh6V+fvkXFwcgcyiE6ok1bE98ypmyeQT2Jqxl3TNjx2jBjrCTej2POdmj2b88HFF/dKjn5l11F75zlul9p4m/iy7Icwwh1Svk0kNjEO3Iegaq2dZTNgJF6o2aOO5dB2cBjhWrsjt9WUnCSilocTvqu435aZAht7J1Q+IWxe5Q+k81G8dgjpKu+vRsEO75en40A6H62LNyyXomE9l6gJje0YOv2fF5DB/mRj16cMkv6TAXjRp2v10huTER9O+PIokkqaYDW3waGXF9PXrGZhL8iP+zfqBvmyIQec3IGL0ajLDgdfE/JTSFED7frQuZYfNt/+I53kpGSOnjPagQ+wpFkzgFtQCHCJ37MAOtO9W5mkRbcqWIcTj/dtAReTST5+htO50F3VnxYVpK6utsWPsf53PrOG1yyEuB/UeSBj5zEBLyvIWB5KxMkyXWZzEgmfG0lSA6pB2KhG4mUFSUtCidZvb12YQ+KnkairtLZ8cuq4l7DN4DF4AVFzZxF1E9wnjqh8G3YXb2Z3BnaF886OMhWfwgyiQ+YTZ9AewQ7XwUpQhxK6GdxHDSntR2CjYqQXqFdHEH9TgefjtT2qTw2xUpCzLYKIN8IMjz9AofZgOtkW33uu8v6qnPwb9du1XPtUBvLjwojSqwBqg3yITzS8KZH/cyx7qvbFuKM6LX8tHcPaYuMxAX9j1ycgIyaUsGRQTJ6Au0310hvTsiCdiFWhzP+4zKAHhRR/dsCmQT6OsWOEjxcKMgh/N4KUMu8PyvkunLnrkir3LrOEPSQa/E7lk7Q8jCjAaaJ7LTztMdRhkAcdTCHz6w3sNPJeluvH93DoD6B1d7q3AbDEsjnAWZKO66Uvuk7yF9upaJTjDn1csCWThG1HuW7aHZfHar4RR4ueLnQwhSsHY0jI0ptRdJ3kbw5wqdwSt8m9fp3csl18CnK5Xq7fTwVphRBNRt03LWvtyXsbp5PqG0m4txv7x/qiaqcofg+BmmwLT7rkTMe11n8Iz7Ln43AyKuyn40uQpwO09iQo1J34YDXzR7qxZ4xv8XtkolFf175H5u7VLDucuipAHcXcF8HfxYH+L0w36Chea2zcmTBZQdxn+eTjyRiPKmzUTEngxiUcGzmfuFeHkxjnq+0PclP7zpWkAiWB4/KhQ+mdeafHR+BAChm4MuRx/W3ZoBruBXFxZFhNZ4RLzRWxVpkpmf7BdGJ9I/WOP+RfTCR6WxK09mW2l5MubVX3151onw6oiQqaBi+44PD4JKbXS78iO3xfCyJCHU748mh8N/hiB9i4j8HfKo6o5T64ndCWNefneGLjM8i3Am7ebb132Wp9lL+Gj2GtU6gI2RhI0tQIg/MzJzWa6IRscA5k89/0hlKuanojbEaFsCROzfx1Prgd9cJ3gAMKiq+JWQqU831xbyC7pyybUe+xeUYqPuvCeWbgfrzGqnCwKi2/YlQX/qQSg5BkRRHwZCKeE9y1yydHEfVdDnSezpLJdXCTpmV/Al44z7LPkzmwOpRDLdri1KUNluRyKS2dzOu3tcMZTy3uH0ML+rm7EP9pMqe++AehB5zobn2dM+mZ5BYVr/NSJpfAcJSwR/vxeJsDxF/KBWcXelnUTlnGP3WUZd9kEv9hCMldexW/R+YMl4w8LLqeuIF/bEuHzmN4+6WSJ0Tn2b44gkM3Len/8kLGdLxTWiFEU1IvfWQUfUP45tAQIt8P55NvIkm6CQp7Je5+HzL75TsMAlCjMlB/FlHxE5UZQ7SBDODgs4EDbSP557KPiNscgbq1E57j3+ObFxR8NGgWcZXYmuucGELOBhCeEEVEihc9X6ipctyNAlcPXxSfRZHvNQb31lVcvJ0vG75XErf6Iz7aGkVEAtDaif7uIWz+23RUZds0d3PHt1044a29UZUZhMZGNRRP4ogf079Kgw3UN0XfEHbs70/Uyo/4ZHsE6pLzdYaR87Wq++tO+gYRMz+dgFVqoladwuujSTVcsiro5U/QqHDm7grlo++K35vT3J2FezfTZmEon8RHEZGgwK6vJ7M3b0b5rRt+VXzHRzn1Vf6aPIZ1QNE3iB17+2uvp7rz0xXP+R/y1rTyHe+rmr6csp39V+UANjgN8CRk6Vv1FGxXlgLX+d9weEAk/1z5CfEbkshHgZ2zO/4rZjN7lBKbylyb/DZwYEAS7yzZRGRqNvlWdrj6LWHh33xR1vbLMItZ9pzI228N0o1CdiZJ2zTL3KoNHVz74Tl6EE56NXdz54nMe6kNW6MTOHPxDMmm5ti278+Yif258tky4v93id8vQU+DSKYN/dzaEr8jk559XWqtwtDmqVeZ/1AMUbFJZJ5J5pK5JW2dn2aGKpftHycYPJWxbGlLC1PA1pbSgaQtsW1tDvm22Da/W1ohRFNiotFoNPWdCSFE9Vxx6FJumunJoxQUFPDnn3/SpUv5+aLupKWl0bJlSywsLCjq9Xi5+bYZafWQK1FtWdEE9J+PWv+lyaL2nNpE8KZTWLrNYOGzjaJXpRCijtV5HxkhhBBCiBKX/rOOLcfLDFxQdImE/5wCLOnVU4IYIYRx9Tb8shBCCCGqLjg4uL6zUCOWLl0KtzM5mforyRf/wcl93en1qCVQ3EcmHyz/bzyji0dau9dyDxkyhJEjR957xoUQDYYEMkIIIYSoH+Zt8Zj9Jt2PJrD9YDInk3K5DZi37kD/p8fg6dZW+rcIISokfWSEaMSkj0zDJn1khBBCiNojfWSEEEIIIYQQjY4EMkIIIYQQQohGRwIZIYQQQgghRKMjgYwQQgghhBCi0ZFRy4RoYkxMTHT/l7E86pdGo7njMZDjI4QQTZv+b7KoeRLICNHElFSO9SvJUmGuH8aOhbH5QgghmpaSAEb/Oi9BTc2TQEaIJqjkScDdngiI2nW3YyDHRgghmiaNRmMQuJiYmJSbJu6dBDJCNDFFvR6nGWAHXKvvzNzn2txlfmFhYZ3kQwghRN0pCVaM/SvBTM2SQEYIIerJ+fPn6zsLQgghalmnTp10/5dgpmZJICOEEPWkS5cu9Z0FIYQQtSgtLY3CwkJMTU0xNdUOFizBTM2RQEYIIYQQQohaUtKMuGyfGXHv5D0yQgghhBBC1JLCwkKKioooKiqSQV5qmMnPP/8se1SIRkp/eF+NRkNhYSGFhYXcvn2bFi1aSNMlIYQQoh6lpaVx/fp1zM3NMTMzw8zMDBMTk3IDAYjqMZeKjhCNV0kgU3Knp7CwkFu3bnHr1i0uX75cz7kTQgghRMeOHWnWrBnNmjXDzMysXH8ZUX3StEyIRuHsKjcAACAASURBVK7siy/l3TFCCCFEw2Hst1l+p2uGBDJCNBH6F8qioqL6zo4QQgghQNc3Rm401jwJZIQQQgghhBCNjgQyQjRBcsdHCCGEaBjkN7n2SCAjRBMjF0whhBCiYZHf5tohgYwQQgghhBCi0ZFARgghhBBCCNHoSCAjhBBCCCGEaHQkkBFCCCGEEEI0OhLICCGEEEIIIRodCWSEEEIIIYQQjY4EMkIIIYQQQohGRwIZIYQQQgghRKMjgYwQQgghhBCi0ZFARgghhBBCCNHoSCAjhBBCCCGEaHQaWSCTzEoPDzxeiSWrvrMihBBCCCGEqDfm1V0wK3EDK6N2k/zTFQoAi5aPoBzgx8xpI3FqWYM5FBXK2vYKfh+lVjK1N8sS5uBSqzkSQgghhBCiblT9iUxhFrHBI/FbsJnDF0A51BvvsSNxsb1OytfLeOm5V4jNrIWcNiHff/8933///T2vx7rTILzHehv8ubXXzmuv8i4zT8mD97zFimQR+4oHHh4rSa61bQjR8G3duhVHR0cOHz6sm7Z06VIGDhxIenp6PeZMCCEat7i4OAICAjh27Fh9Z0U0IFV8IlNA8ppXWHmsANthIaz6mwf2ZiXz5nHjyEqmvhnLyqC1dNk8E2ezO63r/hUbGwvAk08+eU/rse49njm9Daclr4jl8AVwGTuHOX3uafVC1KnDhw8zceLEctPt7Oxwd3fH398fpVKJiYlJPeROCCHEnRQVFXH48GF27tzJ77//TlFREdbW1vTr1w8vLy9sbGx0aa9evUpYWBi2trbMnTsXS0vLWs9fXFwc27dvv2OawMBA+vTpoytLXFwcly5dwszMjG7dujFhwgTatWtX63kVlVe1QOZCLCu3XQG78Sw0CGK0rPvNYaFPAq/EbGX7dwE4D7aowawKIe4HHh4eKJVK3efLly+zc+dO4uLiWLx4MWPGjKnH3AkhhCgrLy+PjRs3cvToUWxsbOjbty/m5uZcuHCB/fv3c/z4cWbPnk3Hjh1rLQ+5ubnk5eVhZWWFhUXF9c/u3btja2trdN6DDz6IRqPh66+/Zvv27dja2uLm5kZeXh6pqaksXbqUOXPm4OTkVFvFEFVUpUAm69gBLgDOE8ZU+LTFuc9AiNnNwZMpMFivR0bhDdL3rmXt+gSSrxQAFtj29iBgxkxGdrMut54bP+1m7eq1JJy6QYGFNe37eDPzVWW5dDp/prN7/Vo27EvmSi5YtGyPi9dM5vi7lQu47nuV2Fc3jq1kanAsV5xnsvnD8aX7sDCLrXP9WJtqi/fSJXT4dAYrdd10YpnnEYv0xxH3Yvjw4YwfP95g2qRJk5g5cyYbN25EpVLx8MMP11PuhBBC6NNoNMTHx3P06FFUKhWTJ0/WBRIajYbExEQ+/fRTvvrqK2bPnn3HIONe7N27l+3bt+ueqlTEw8PjjvMvXLjA3r17cXR05LXXXqN58+YApKens3LlSrZt28arr75aa+UQVVOlPjK/XUgFLOjSyb7iRP3mkZCQwO7Z+kFMFrFvjuel93eTYu3CyLHeeI92wfqn3Sx7eTzz4gzHILtxbCVTX17G7lMF2KtG4j16II9kbiVk0rvsNrbNzFjm+b3Esr1p2Bf3DRnocIXkqBCmvh5LVmFVStnEVXJfWfeZSchYW0jdwD+/Lj0+WV//kw2pYOsVQkAfB9ymhRG2aB4j2wK4ELAojLBFo+hQL4UTTZWTkxMjRozg9OnTZGRkABX3PUlPT2fgwIEsXboU0N6lmzt3LuPGjePs2bMsWrQIFxcXunbtyuTJk0lLS7vr9gsLC9m3bx9+fn507dqVHj16MG3aNE6cOIFGo6n5AgshRCORnZ3NgQMHsLe3x9fX16CCb2JiQr9+/XB2diYtLY2ffvqJxYsXExQUxOXLl0lLSyMwMJDFixeTm5vLsWPHCAgIICYmhi1btjBjxgyWLFlCfn5+nZXn/Pnz3Lhxg0GDBumCGIDOnTvj6urKr7/+ysWLF+ssP+LOqvBEJosL6QBOdLhDHGN0ybh/avvVeC1j46su6J6/TDvMypdCiF3xJlt7b2R8e6Awlc/fj+UKtngv3cicPiWp53Eh5hVeWlN2lK4sYpesJNnCg5CPQ/CwK5k+h6yYV/Bbs5JleweybITxx4j3l6rsKwtcpoXg/d08Ytds4ODQEAZykA1rUimw8yZkmvY4Wvd2w54ssr6A3ZntcVa5yZMYUeNMTExwcHDg9u3bFBZW787EuXPnCAgIwMnJicmTJ5Odnc1XX33FvHnzWLduXYVPeQoLC1m1ahWrV69m9OjRrFq1imvXrvHvf/+byZMns3btWtzc3O6leEII0WhlZmZy7do1Bg0aRMuW5YetNTc3R6lUcvHiRXJzc/Hy8iIrK4vY2FisrKwYNWoUtra2BgGQWq0mLy8PGxsbrK3Lt9qpTVevXgUwCGJA+zvUrl07Dhw4oEsj6l+1h1+uvFS2R6WChQdzZugFMQAt3Zg524PdoQl8vieV8TOc4acD7M4Gi2FzmNnH8ORt7xPAyDXziDVY/XY2pMIjU17Qq5hr2Y/2xWPNuyQcS6FgxEDq+iHgmTNn2LBhQ7npf/zxBw899BDBwcHl5nl7e9/zIAAVquq+snRh5nxvDgbFsmyNGwWsJKHAFu+/BeBSt9cVcZ8rKCggOTmZtm3b8uCD1Rt/7+bNm7z55pv4+PhgamqKRqOha9euLFy4kNTU1AoDmczMTE6cOMGsWbOYM2cOZmbadpaDBg0iICCA2NhYXF1dadasWbXLJ4QQjVVGRgYajYa2bdtWmGbYsGEMGzZM9/nRRx9lz549uv40ZTv7W1tbs2DBAuztq3jnvAaUBDB//fVXuXktWrQAkECmAan9QCYrjZQ/gX5uuBgZlMKiz0AGkkDCyTSycIafUrgBOHfrUqnAI6s4/Y1Pp+LxaUWJrnAFqPuvQ8VKgpm6VJ19ZdF7JiFjDzJvWxhhgK3XMgL6SBQj6s61a9eIiopi27ZtvPjii3Tq1Kla6+nRowceHh6Ymmpb1JqYmNC9e3dA+32sSPv27Y3ekGjVqhXdunXjr7/+4vbt2xLICCHua61bt66xdbm5ud01iDl27BgRERHlppedVrbPjLFl9NM5OjpiaWnJgQMH6NOnjy6wuXbtGv/5z3+qWhRRy6oQyNjT3glITed8FlWPCuxtMVr9tbQ2Or1L+6ptoL3KG5eKFrFrb3zbtax79+66dvr6Sp7EGJtXF6q2ryxwmRCAy7ZlJOPMCxNc6mVfivvHG2+8wRtvvGEwzdzcnEmTJhEYGKh7IlJVZmZmuiCmqgoKCjh48CCbNm3iyJEjBu21vby8qrVOIYQQxlXmOm9hYWEw+lheXh5//fUX1tbWBs3UynbKr2jUspKn/R06dKBPnz589913hIaG0qVLF4qKikhJSdH9htRk0CbuTZWeyDzS3hlIJe1cFvSuoCZ8ZBkeb+7GYuwyww7/WVe4AeUrwX9eQb+rv0XxuZuWeQX6VL5fyyODA5gzTKrYlVG1fXWDg+tKXnSZytoVCQxc7IH0OBK1RX/45dzcXOLi4nBzcyMoKKhO3jVQVmFhIatXr+bjjz9mypQpzJw5k86dO1NYWMg//vGPOs+PEEI0RMaaYtWmXr16sWzZMt3nkvfETJ48+Z5GLTMzM2PSpEm0adOGb775hh9++AEbGxtGjx7NjRs3iI+PlxHLGpAqBTL2fQbRnlRSv07ggpcf7Y0EzKnHDgIwsFfxUMn2XVC2hNTjh0nO9WBgmXpIwcnDJAPWvbpoH/I4KbEmlfTUFAq87t6vxb59FyxI5fDeg1wZNlIq2HdQnX1149uVhO0rwHbsMkIswpj372Us2+tC2DDZ06J26A+/rNFoaN26NR988AHDhw9n1KhRdZ6fkydPsn79eubPn8+kSZN0L+TMzc2VHzMhxH3PwcEBExMT/ve//1WYZu/evcTHx/Pcc8/Rt2/fOsxd9Zibm/P000/z9NNP66ZpNBrWrVuHtbU1bdq0qcfcCX1Va2fR3ps5Y23h3AaCPjrMjTKDB904spLQmBtgN54xA0p+4J0ZNaE9FCSwcl0yNwwWSGbtRwkU0J4XRjhrp3UbxEg7KNi7krXHDFJzIWZD+eGXez+Ftx1wbC3L9hgO40zhBbb+/V12X6hSKZuuqu6rGwdZuSSBgpYjmTfFBRf/ELztCjj8wTISrhjbwA1u5NZe9sX9x8TEhLFjx/LYY4+xadMmsrOz6zwP586d4+bNm3Tv3l0XxID2Lda5uXLCCyHub23btqVVq1b897//5c8//yw3v7CwkNOnT3Pz5k1sbGzqIYc14+LFi5w+fRonJ6c67+MsKlbFzv4WuMxaxZwLU1kZF8L4b9vjMsAFe4sCso4fJPncDQosXZgTPtPghZntfRYz59hUVsbNY/xxNzz62GNRkEXyvsNcyLXA5dXF2qGXAcyceeFv3iQExxIbPJ5klQcu9pB1LIHDWZR/QmPmzMzwOaRMX8nh96fivXsgHl2soWT9hY/gPfoGI9tLs7Oq7asbHFwRRkKBBR7zZ+JmDeBCwOsjSXhzN8veT8BF18TMnvZOFpCaQFgQjHTugNsEP9zs7pQZISrn4YcfZurUqcydO5ddu3bpnop06dKFzMxM3Q8LaO+YHT9+nN9//73Gtv/oo49ibm7ODz/8wOOPP46pqSlFRUV8/fXX7N+/v16eEgkhRENhZ2eHm5sbe/bsITo62uCFmAA//vgjp06donv37nTs2NFg2aKiorrObrVcvHiRTz75hPz8fIYOHVrtvpqi5lV91DIze7wXb8Xl22jWfhFL8texFAAWtk64+Psyc6wH7csOI168jHLvWtauT2D3tgLAAtveI5k3YyYjuxkGGdZ95rBxdRfWrl5LQuJuYi2sad9nPGFLnyD5pVfYWjZPbb1Z9S9l8dvqE4g9BVja4tTbj7DZAbg1pOHK0A6xXN0hZO9ZJfdVSZMyi34hzBxcenys+81k3tAE3t23jNBtSlaN1S7gMi0cv1/eZOupBGIvuNFlQj2UTTRZAwcOZOjQoURGRvLkk0/i5OREnz596Nq1K4sWLSI5ORkbGxtOnjzJwYMHuX37do1tu1u3bqhUKiIiIjhx4gTOzs4cOXKE1NRUFAoFFy5cIC8vr1767wghRH0zMTHh6aefJisri8TERH7++We6dOmCqakpFy9e5OLFizz00EP4+/vrAhwrKyvs7e05ffo0q1evpmvXrnh6et5TPoYNG8bAgQOxsrKqiWKRnJzMsWPHdGUwNzdn4sSJutEuRcNQveGXzaxpPzSAsKEBVVrGacQ8lo2YV6nk1t1GMm/FSMqmdvsqgZnGFmjpxMjXlzHy9cpnqb7U2ntiAJdXE0h49S6JKrGvrAeHsDshxNgcBr61m4S3yk52JmBFLFU4I4SotBYtWjBp0iReeukltmzZQkhICO3bt2fZsmW8//77fP7551hZWTFy5Ei+/PJLQkNDa2zbrVq14sMPPyQiIoIvv/ySQ4cOMWTIEKKjo4mKiuLgwYNcvnxZRrERQty3mjdvzuzZszl8+DBxcXEcPnwYjUZDy5Yt8fDwYMyYMQY3eywsLHj22WdZt24dKSkpaDQahg8ffk95sLS0rNEbShkZGSQmJtKqVSvc3d0ZPXq0XOcbIBONRqOp70wIIapHo9Ho/goLC7l9+zYFBQUUFBTw559/0qVLl/rOohBCCHHfSktLo2XLllhYWGBhYYG5uTlmZmaYmJjo/kT1Ve+lCkIIIYQQQghRjySQEUIIIYQQQjQ6EsgIIYQQQgghGh0JZIQQQgghhBCNjgQyQgghhBBCiEZHAhkhhBBCCCFEoyOBjBBCCCGEEKLRkUBGCCGEEEII0ehIICOEEEIIIYRodCSQEUIIIYQQQjQ6EsgIIYQQQgghGh0JZIQQQgghhBCNjgQyQgghhBBCiEZHAhkhhBBCCCFEoyOBjBBCCCGEEKLRkUBGCCGEEEII0ehIICOEEEIIIYRodCSQEUIIIYQQQjQ65mlpafWdByFENWk0Gt1fUVERt2/f5tatW9y6dYsHH3ywvrMnhBBC3Pd++eUXmjVrRrNmzTA3N8fU1BQTExPdn6g+c4VCUd95EEJUU9lAxszMDFNTU0xN5WGrEEII0RAoFIriQCaNdU/NYWtx8KJ6czvvj36onnPXuJmbm5vXdx5q3eWvX+fpRYkATFybyJzH6jlDQtSQsoFMybSS/wshhBCifpmZmWFubo65uRmmek9hTMzMuR/q4YZOslI1gy3Fn1QLdvLB6Oq3ICkTyFwmdu4IFn1f/PHJBez50JvG3kDFXO/utJm5OffdOVNTCjOJD53GP+IvQzdv/rF0AUMere9MNRyXd8xlxN+/v0sqBQ9268j/9RzJk0P6M7RvJ1qYVX+bxgKZoqKi+/DCKIQQQjRM5ubmxcGMGfo/+aam92MgU7P7wNzCwkLvYzPMTU3QNdczNaeZhQUWxpZsRJqZm+qiXzNzCywae4HqSf53/+Ltb66AiQn8HMfbu57lyMvyeKuE/nlWsQKu/Pwz6p9/Rv3VCv7ZvDtjX3uTwHGP8VA1Ahr9QKawsFCalAkhhBANTLNmzbCwsMDCwhwzvScypuba6feXZtp9UPzpXvdBmScy5pjp18NMih+FVXv1tej3r5jl8Q4Hiz9O2XSKea7Gk5rrCvUETo82jCcyf2yfhfsCXe75/NQ8etdrju6u0MywkmxianYf3kmomLlZNTrs/XWGbf+czK4vp/Dh2nkMrGJTWf1ApuTCaG5uruv4L4QQQoj6pW1Wpn0qo1+Tuj+bltXsPigfyBg8kWnAgUxJO8Pij6Z3aDJmblZyp1yB2QMNI5ApzROAacPdz3rMB01j2TP/YcHOP1A89jyLxvdtEPuyoTA8poNYpP6YZx8uk6gwn5w/LnL60E5iojay83Q+AAU/fcbLT59jUezHPNu28ts0FsiUNC2TQEYIIYSof6VNy8wN+siY3peBjLlh/f1eAxkzM4OWapgaPJExxdTMsC1bg2FqimFWzTCrIKOmukKZYHqHdHXJ1HBHN9z9rM+sA94fHML7g/rOSMNU7pgaO9fMmtP60a7093md/mOnMz32PebN30o6wM0DLHjpfbp8OR+X5pXbpn4gU/LZzMyMwsLCGiiREEIIIe6VqakpZmZmmJmVrbtqp99fanYfmBu2qS8Z17pk7SbaoVyrvfpaZFo8/nbxRxNTUyrqHlAa/TrR/uGK09UlU4OxwxvwfhaVVvaY3umc1C7Qip4+i4myewD/6Z9zFuCXjcyLGMmeN1yozMDo+oGMRqPRDb0sfWVqTnp6OlOnTuWZZ54hODi4RtddUFDA1q1b2bBhA+fOncPX15eFCxciw+ILIYShuLg4tm/fTmBgIH369Knv7FSJ/m+z/rtjTEzux99rU8P6+z3uAyOBjN7Hhh7I6H3U7oiKkpZWLh9o1jDKY2q4o7WV3nrLjagJRo9pJQ7qw0NCWDL7B8Z/lA7AxcglxDy/Ff8Od1+2KQUyhw8fZuLEieWm29nZ4e7ujr+/P0qlskm9PCw6Opp33nmH3r17ExgYSM+ePSWIEUI0SkVFRRw+fJidO3fy+++/U1RUhLW1Nf369cPLywsbGxtd2qtXrxIWFoatrS1z587F0tKy1vNXEgjdSUmQpNFoOH36NF9++SXnz59Ho9Fga2vL6NGjGTBgQJWbQpX+Nhuruza+3+t7U7P7oF4a5uWcVRO9Zj1xicdIydL2EbDprET1xHh8J3vj3tnGyFJJhHUez/oK1rl+Qme9ee4s/n49vvZVyFRhDunqWD7/citJ/03X5Uthr8TpMVfGP/sC3u5O2FT36dePYXSeUGHuGd9Zb577Yn5Y74udboJh2d3f+4H1PnbF+zGCyG+SyL4JCvvX+fz7QMqNeVCYT0ZyLNH/2sG3J06RcjZHO93KDqWLisHP+OP7jCsOd6s/ZUUz7ck3URd/nPbvs4T0LZvIMK+6NHr7N/FICulXAWxw6qVi8AvTeXGsK3Y1/XS1pspd6xS4Tnkdr40vE3cTIIn1cSn4v6Ks1NI5Z78leq32+5SalY9Go8G6Q1e2RUbUaq5ri4eHB0pladkvX77Mzp07iYuLY/HixYwZM6Yec1dzcnNz+fHHH+nVqxdr1qzh4YfLdqgSQojGIS8vj40bN3L06FFsbGzo27cv5ubmXLhwgf3793P8+HFmz55Nx44day0Pubm55OXlYWVldcdRsLp3746tra3ReQ8+qH3hyJEjR1i/fj0KhYJBgwZhbW1NYmIimzZt4pdffmHy5Ml11ySshuoy2THTeOKN4hqcfj0zK4noLVHs2JfIsdRs8tHWffuovJg2y7eCOnlFec0madsnRH7+LYkn08kBaO2Est9gXgjwx9vFAUV1d9vVdNRbI1i/ozSftHbC3KTsXWSDpQybb92z7EQigmYR/l1OuVk5Z1OIP5tC/ObFKJ97j9X/8MLBoLBVy4d+E7k24zfwy/iK0+b/HE1o4HyizxqZl5VCSlYKKfFRvNu6PyGRq5nuUoWDWpqhqiQus9/LlN0kn4vbX+aZoHj092R+Vk6545WTHMnL08M4dNXIZm5mk/JdHCnfxbH670p831vNklEOlS6DQTPECvJqYmKCyR9qwqbOIjI1v0zaHNJPxpP+Rjzrl3jyYewavNpVvPmqqNFyV1LZfWF8/1TAdggTfBTEfabdRxkx35I6pyd3DGWKv08fFH+fSvrJAFz/9afKZ7yBGT58OOPHG35hJ02axMyZM9m4cSMqlapJVPrz8vK4cOECnTp1MrhTKYQQjYlGoyE+Pp6jR4+iUqmYPHmyLpDQaDQkJiby6aef8tVXXzF79uxaG2547969lWp65uHhccf5N2/eZPfu3VhbWxMcHIy9vfau+IgRI1i+fDn//e9/+e2332jXrvIVlpLmZOXq1AZN0surybqM4WZMMDEp4Ozmufi8bViXBG3d99C2FA5t+wDlK5uJec31rs3dK8zr1XRS4tN5M3497zpPZ83GIVXaBxRmk7j6NWYtP1Qun1xNr7vWTPk/RxEw1K9cEKOwd8LOyiAlKf+ay/CZ0WSU6a/cppcSZcmfs53BTrXprDevVxtaVjLiy/8xjGdGlA9iFPZKnFqXSXz1EGHP+hD2Y9kKeSWYtdHLnxKlvUHucTKY1/KOq8qOD2VaUPkTr6yMbbMY9GzZk6pkW04YVJ1uphA9ezgBMRlVKtbd5JyIImBogJEgpoyr8cydGkbSX/e+zYZQ7qpT4DrYt/TjxT0kGQmsS5R8nz4o832yeNiRhyo5UEBj4uTkxIgRIzh9+jQZGdpjtXTpUgYOHEh6erpB2vT0dAYOHMjSpUsB7V26uXPnMm7cOM6ePcuiRYtwcXGha9euTJ48mbS0tErlQaPRcODAAby8vHB0dMTNzY21a9eSl5dXLm1mZiYhISG67fj5+XHo0CFdsLl161b69OnD8ePHiYuLQ6lUlivLhQsXdOtwdHTEy8uLHTt2GAzkUFLWxYsXs2PHDp588knGjx/PtWvXACgsLGTHjh0GeV6+fDlXrxr7VRRCiKrLzs7mwIED2Nvb4+vraxComJiY0K9fP5ydnUlLS+Onn35i8eLFBAUFcfnyZdLS0ggMDGTx4sXk5uZy7NgxAgICiImJYcuWLcyYMYMlS5aQn1+Nelc1FRQUcPPmTTp27IidXWnbmObNm9OrVy+uX7/O77//Xuv5qNW6zOUM9ix5huFGghhD+aSs8mPWXdab/2MYPuXyaiRdaiQBz4fqWvbc1V/pRM0chF/ZIMbKDqfienTdNC3LimPWs6Gob5ZkQIlv6BJCxipLm2pdTSH6/fmE/iuFfCA/YT7TVjqxQxcFujI9dgfTdeuMJqD/fN3O8F28w0gzp7vITyL89Uh0VYfW/QlasZjpKr1HX/kZJMVEMP/t6OJ06UROfYM+Bz7Es2ygcycu09kRq8s92TEBuAXrcs+S2JDyTcIqkJJQvJyVEq8Jvgx1d9I+Hryq0DVHy08ONwh2FM6+LFwchLezXWnZCvPJTo3mnSmhxF8FyEe9MJx4jyqW7Q6iF4Xqth/ybiC+JY8VC/PJ/jWeT159ozTIORtJ2DZ/Yvyq/3SkoZS7OhROSvoDhwBIIT0jHzobuQdS7vvkzPi3w/jbM91prrlNQUEBBZdO8med5bz2mZiY4ODgwO3bt6s9Itu5c+cICAjAycmJyZMnk52dzVdffcW8efNYt27dXZ/ybNq0idjYWEaMGMHgwYM5cuQIS5cu5cqVK7zxxhu6ZgZpaWnMnDmTli1bMmfOHAB27NjBlClTWLJkCePGjeOJJ55g9erVvP/++9jb2zN9+nQsLS11d/6SkpJ49dVXKSoqYsqUKdjb27Nr1y5effVVzp49yyuvvGLQrGH79u1ER0djb2/PI488AmiDmFWrVvHxxx8zfvx4pkyZwvHjx1m3bh3Hjh1jxYoVumYUQghRXZmZmVy7do1BgwbRsmX5G7Hm5uYolUouXrxIbm4uXl5eZGVlERsbi5WVFaNGjcLW1tYgAFKr1eTl5WFjY4O1tXVdFkfXn+X69esUFBTo+u9oNBquXLmChYUFVlZWd1nLvan1uszJCEJPAtjQ/7UPefc5FU52xfWNvzJI+jyMgCUl289HvTCSxGcWojL2WOZqPG9M1atLo0D53EKWzPEuvWmfn036d9G8GxzOobPppBtZTXnZxL36DKEJJUFs8Xr/5ouypHyFOXURyOQQv+QNvUqXO0t2bsC3bEfm1kp8w2JwsnkGn3XFIcOqj4iduKFqfV2qkrOETUReLPnkysLoz/HvXCaRwgFXvyXscLRhuF8kGQA34/hkVxCe91Dhvlc2o5awY4VvmeZ3JXJQfxZReqJ0ns7m6BBcy96pN1Ng18ufNbEKAgYVB4W1UDabUR+yY0WZpoJmCuw6exESbQcj/HTHISlaSctp2QAAIABJREFUTbqfP07V2lLDKneV2bWhMyWBDGT8kQN6PaW0yn+fFsdF4tNeQ2FhIbpXx7TqBn82nVCmoKCA5ORk2rZtW+3K982bN3nzzTfx8fHB1NQUjUZD165dWbhwIampqXcNZOzt7Vm7di1dunTR5Wnx4sVs2bKFESNG4OrqyvXr1wkPD0epVBIWFqb7AZ4wYQJvvvkmn332GU8++SQODg5YW1vTsmVL7OzseOKJJ3Q/lteuXWPFihVYW1uzevVqOnXqBICPjw+rVq1i9erVuLi4MHjwYF3erK2tiYiIoHv37rpp+/fv57PPPmPNmjUMHjwYExMTxo0bx1NPPUVgYCC7d+/mhRdeqNa+FEKIEhkZGWg0Gtq2rfglaMOGDWPYsGG6z48++ih79uzR9acp29nf2tqaBQsW6G7u1CUbGxsGDBhAXFyc7om2hYUFP/74I0ePHqVr1644OjrWYg7qqC5jpSRwYwxBfctEJ80dcJ2xhh02AQwOKb5xfjOKPYkhqNzLRjL5JH0cVty/F0CB+9Jv2OBTZvsKO5w8Avn8oIowbx8i79DipETOrn/yhl4Q4x72DRueK7NeM5s6CGRSowiPK30k6B76XvkgRkeB62tLmL7Lp7hiqyYqPh3fydWr1t5N+k/xpR96eaMqG8To50z1IoHukcwvPqZJ6mSy/RzKVTPrROdANoRXFMQApJNxTomyl/ZTO3//8l8Afe1GMMFrPuo47ceko6nk+DlQI632O09nw9Ky/Z30NFfhO01J5MIU7eeT6VzMB6dqdcBvQOWuDoWNQXPJs9mXKBfIlP0+vbOE8R1Ar3tMk3Pt2jWioqLYtm0bL774oq5iX1U9evTAw8NDNzqKiYmJruL/xx9/3HV5T09PXRADYGFhgbe3NzExMfzwww+4urpy9uxZvv/+e1auXGlwF7F58+YMGDCAffv28dtvv90xaPrpp59ITEzknXfeMSirmZkZ48aNY8eOHcTHx9O/f3/dvKeeeopu3brpPt+6dYu9e/fSr18/Hn/8cYP2x7169aJLly6kpqZy69YtmjVrdteyCyHE3bRuXXNNGtzc3O4axBw7doyIiPKD2pSdVrbPjLFl9NOZmJgwcuRIbt26xe7du9mzZ48uzRNPPMGkSZNqrZ+PVl3UZRS4h64uH8TocRg7Cf8QNVHFn5POZ0DZ28w5ajatK212pvBbw5qyQYy+5q6EbFxC+qD5d25eVphC1PI4dLUd94X8P3v3HhdVtf+P/zV3mIEZUEctRVFGvIxXNP1gmqMeFS0EO0aW4i26GGb482TKqdCTX7KM5GRaR01No4w0uXTROuqU58RBDSlCSgFFtFRUmAEG5v77A2czA3PlJqPv5+Ph4+Ge2bP32rPZe9Z7r7Xe682mQcxt7R7IFB5Nb4wqRbF4OtJF1V8QhqgngrBzU8MXU/htHsoXydAez8kFPt0B3D4BpeVQGwHHs1JKEbEmDUFLby+Ket+xCu/4p2IQ5rSi36QbnktiSK3vF2oV2qo36vinXFyAAGSh4QBuBzIox/VKAC16CNN5jrstlKual6bp9RQXKQVwd0UxL7/8Ml5++WWb17hcLhYuXIj4+PgWZ4rhcDhtnuYyKCgI/fv3x+XLl6HX63H+/HnU1tbiqaeecvgZV329L126BAA2rSsWUqkUw4YNY/ZjweFwbIKVmpoa/Pbbb8jPz8ewYcPs7qdPnz4wGAwUyBBCOh137vN8Pt8m+1h9fT00Gg38/PxsAo2mQYejrGXWrf2//fYbfvjhBwQEBGDMmDEQCoX43//+h7y8PAQHB2PatGntOBVAR9RlYvH0HBc1a0EI5OEAchoWC/9sPppGm/cjspilICyPUbieA6+3DDLAeSBzVmk1dl2A2KVRDhsO2jmQKUbev60GCEVPdlEBbyAbrAAsMWBOIUq0aJcUubJhCgiwr+GE1+7Emg3jkfaKwmEaYHFoOMJD274cnuJzfe50EdzmVlmpHuUmB9fT3RXH2KRfrqurQ1ZWFsaNG4dVq1Z1yFwDnrDM1qzRaGC43a8vMDAQq1evdpjRxrpVx54LFy6gR48eCAgIcLiO0Wi0yVLnyKxZszBv3jy7P7hisbidnyoSQu4lGk0bZOvxwLBhw/D2228zy5Z5YhYtWtSqrGU3btzAxx9/DKlUipUrV0IobHgaO3PmTOzZsweHDh1CUFAQBg8e3HYHcwe4ToPsA1dRSXGRVTgimoHwIa0t1e3t5h1GY20nBpMfcFyQ9g1k1CX4qaBxUTFC7tZs5QI/67Va84TexX7C47B+ajrW3O6DV/zRUow7IIUiejmio8IRPkQG6d2QAcqoRnHOYSiPHEfmLyUot+T2vtt5+XE3mxfRwfV0l8UxNumXzWYzAgMD8c4772D69OmYNWvWHS6dLZ1OB51OBy6Xy7T2VFZWIjg4GOPGjWvRNvv164dr164xmcesmc1mmEwmt7fF5XIxevToThcAEkLuHkFBQWCxWPjjjz8crvPdd9/hyJEjmDdvHsaM8TQzU8cqKyvD9evXMXHiRCaIARrupxMmTMDp06dx8uTJjgtkOm1dRo3Lv1s9XH1A5mTIg2fbLTlT2LiokEPuJHho30BGo7L5spWrx6H/ak83UoKKW2iXQAacIMT88wDUz81HsiWNbW0FlGlJUKY1LAp6hmHGYzF4fHYEwj2ZFKgzMKpReCgZa9ano7DW9ep3DW89bnUFrlsthgc1afa1cz2FWF1Plif0ZrMZZrMZ//73v9utqB2FxWJhzpw5OHr0KPbu3YsHHnjAJh3mnfbnn3+irKwMs2bNgkAgQLdu3QAAp0+fxtixY1vU9aBPnz4AGro2NP3Bv3HjBgoLCzFu3Dj4+fnh5s2bdrfB5/MhlUpRUFCAiooKZpuEENLWevXqhYCAAPz888+IiIholrnMaDSiqKgItbW1XjVnlqDZ00SAx+OBw+FAp9O1fwE6fV1GC5V1pSRU1kbjxptsV7kG4/qvcbh2x6RfbpVyqNqztVIoR9zuXMzI2YeUze8h64xtnKu9moesLXnI2rIGgiGzservf0dceOepSDlkLMeRxKew7HPrJHcCSIcoEDFV1mx8T/l/tiLrTEcWsJ1483FXXId1Ig+xsB36U3qhHj16YMmSJUhISMDXX3+NhQsXgsViYcCAAbhy5QqKioogkzUMQDSbzcjPz2+XHP9HjhzBnDlzmK5hdXV1SE9PB9AwABQAhg4dirFjx2L//v2YPHkyhgxpbGf/7bffcPz4cSxZsgQ+Po67XA4cOBDh4eH47LPPoFAomC5qRqMRX3zxBcrLy/Haa685HdsiEonwl7/8Bd999x3279+PhIQEphuZTqfDRx99hHHjxmH48OGt+1IIIfc8qVSKcePG4fDhw0hPT7eZEBNoeLDz66+/YtCgQQgODrb5rCctzB0lMDAQvr6+yM3Nxfjx45lWGZPJhFOnTqG+vt7m3t4uvLku08E6NJARh8gR1IKuWt3bpKnKCY4AQRPikDohDm+qy1H8vRKZyiPIyfkJhVcbh0xpz2Yhef4RHH7pANKWuddN7k4p/nil1QUgQNgLu7D5+XCHY43ytHfHReDNx63+/Sc0NqbKMXqg8ydXluupaUuMyWTqlD8OrTFx4kRMmTIFO3fuxIMPPgiZTIbRo0cjNDQUGzZswJkzZyAWi1FQUIATJ04w41Xa0tWrV7Fo0SJERERAJBLh5MmTOHXqFFasWIERI0YAaPhBX7FiBVauXIklS5bg0UcfxZAhQ3Dq1Cl8/vnnmDlzpsuyBQQE4MUXX8SLL76I2NhYxMTEoFevXjh48CBOnDiBFStWYMKECS7L+/DDD+Onn37CBx98gMLCQkRGRqK6uhrZ2dm4cOECBg4cCLPZ3I4DVgkh9wIWi4VHHnkEV69eRU5ODs6dO4cBAwaAzWbj8uXLuHz5Mrp164bY2FgmwBGJROjZsyeKioqwbds2hIaGYsaMGa0qx7Rp0zBx4sRWz/HSt29fPPDAAzhx4gSSkpIwZswYBAYG4tSpUygtLcWAAQMwatSoVu3DFW+uy7S5QBnkve/UGJkmWjRpZQcTiIMgj4yFPDIWAKCtKEbOofeQsiXrdtOeFnmb5uLlvrlIndVJm0i1Odj3dh6zKHsmDWnMxKJ3Ma8+bi3ychpzf0AUDnmw47WBhutp7WgzE8A0zCNjYMZtqO6ieWT8/f2xcOFCPPvss/j000+RmJiIPn364O2338amTZvw8ccfQyQSYebMmTh48CCSkpLavAyvvfYaevTogXfeeQe//vorpFIpVq9ejcWLF9tk2Bk/fjw+//xzfPDBB9i/fz9UKhUGDRqE119/nZmPwJWwsDCkpaXhgw8+wPbt26FSqTB06FD861//wpQpU9zK6CMUCvGPf/wDDz74IHbs2IHVq1dDIpFg0qRJSE1Npe5mhJA2IxQKsXz5cuTm5iIrKwu5ubkwm82QSCSYOnUqoqOjbcbq8fl8/PWvf8X27dtRWFgIs9mM6dOnt6oMvr6+bTIekMPhYOHChRg4cCC+/PJLfPfddzCZTAgMDMRjjz2GKVOmtG+iFK+py4jR3XrqoKsVUANtn9H3sY3IXuN4yvj2DWR62qZYO1tcDoy5gxMOtoBAKoPimVQo5kxBwpSE25P+aJF14DD+Pivmzswj48rZH5HO9Kccj7gFnfECaAfefNyVR/BZWuNi0DNRCGtaV7V3PY22nxnLG40bNw6lpaUO3x8/fjwKCgpsXhs6dCg++uijZut+8cUXzP99fX2Rmpraon0CgEwmw4kTJ5hlhULhdH2gYZxLcnIykpOTHa4TGBhoU86WbKNp2Zri8/mIjIxEZGSkyzITQkhrsNlshIeHIzw83K31g4ODm93fRo8ejV27dtldf/bs2Zg9e3aLy+fJ5z09ljblNXUZAaT3B4GZxiSvGOWYAXmrtyuFLBSNlZ2CYpQjzOE0LG07qUIzMoy2+pv58dscVLTvDt13ZicioyJv/0tA1lUX60tnY/nfrE6PstgqNVwnY9Ra5RDne8VIqDbhtcetRc7WFDROzxqGuFn2bgWd+HoihBBCSOt5UV2mYbqU2y6nQ1ngeF2PtjvcqrKTcxg5Turo7RzIiBE+M6YxklRuxY4cN6ccvFyIwvbMLycWAAWFKCwoRGFBFr455XpnAoHE5Tqdz+2sb85olMhMc7GO1/Ge4y4/sAxLdzWGxUFLVyEmxN6arbieCCGEEOJlOnddRhAegVhmSFI53ktXup6Ms6QQeS5WEU+YiRhmu0ps3ZnjcLvtHMgA4r/EYjlTKSvHzmeWIb3MxYfK0rEsKhKRk5dh31n3ohltjYcVuhAFYqzGah159z3kOcuOZixHzrc/Ni4rZA6buTynhbot66O9ZWiMkcvx3uZ0lBsdrKvOQUrMMuzrlKn9PORtx22sQM7muZi+2urCD4nD5v8v3GEzctPr6cNnn8fnrq4nQgghhHgHb6rLCMIR87yMWdSmLcOyA076K6lzkPxckstABuIZiLXabvmupVi23/522z2QAUeOuLfiwRSnVok1k6dj2fYclDcNHDTlyNmyACMnr8GRSgCVR7Avq9h+FCYNatwmgH1bdiKv8vaCVguto5POCELM3+Iag5GSnZgbtQb7Tpc3+6y2LAdbl0RiDTOBqQCz50a0anyMtJdN6fHe9jyob+9Xq2llVNMzAtFW8wZqj65B5JKtUJZUNBybUQt1WSGyticg8sH52Hq2yf6Kbecz8RrecNyWMqQlYcHYcZi/Ja/x71ukwMbdiQhzltnPzvW0duoMxO/4Hy537KTKhBBCCGlr3lCXsSJftB5xzHBdLZSrpyMyMd0m6y80FSg+uhULJs/HzhJ7W7Gz3aUbEc88uNVCmTgJ05ftRE6Z7fE673nnYhIa++JwoDQR1vkFBKNWIW2XGvOX7kNDMrliHNk4H0c2AhBJIe/fHddLC1HRJKIUTN2ID19yMMiJE4bJiwTY+dHtAzqTgrmjU26/qcDGH3chxsUkmoLwVdj2Uh7mbrpdmSxJR1JMOpIACHrKIetyHcVnK5oFUrLHUrFqRivzMoRNRqxoJxNF522ei5GbLcXfiNxdrUkkIMbsNRuR8f0aKG9vX/2fFCydluJgfRnCxmiRd/p2tHtZ1RBUtXfa6zbXmY5biTXj+8Pdq0cwJA7v706Ewo2T3vx6KsG3b8XiyJtmQNgNA/t2xfULRfgsy/snxCSEEELuLZ2pLuMGYThWvROPnCVbmey+hfvXIHK/gxpQiAIKodL1eBpBGFZ9sgvqJ5di3+3gp/hIMuYfSUbDnDoydL9V3AEtMrdJFeuRfXgjYoY0CUtqK1BY0DSIEWP8yjT88EEMghyeCAHCV6QiJrA1pRJAvuwAftgRh7Am29FeLURhsyBGjPFrDuBA8gwn5XJ31+FY9XZM26eps+gdg11fpjoYa2ElcAbWH87G+09Yr5iD4ovtVbB25m3HHRiG2LeykZvpXhBj4fB60tzAb2d/w01qnSGEEEK8k5fVZQRjVuHAwfWY4aJOLhgSh12frEdEVzc3LFVgfea32Div6dyNWlScLUThVW3HBTIAIAiNwcYvi5CbmYr182dAHmJVjRdJIZ8wG3Gv78K3P+Xj4xfCIXUVLATOwMbj2Uh9ZgZkli9PJIV8WBC6ezDxpnRqIg6czMe3O9YjdoYc8p5WX1fTcj0TBnEbRbniGRvxw5epiJvROEuroKcc8r7d2ybVXt/Z2Hg4H9/uSMTsCXJILQOnbh9T4o5vkX/yfcSGCiAdONpqzE8hMv/XaXOyudaZj1skhXzYeMxeuh6pmd+j6OQBrJ8rb9HflCA0Bm9kn0XOoc1IenI6hvS3up6E7t4lCCGEENLpdOa6jB2C0Fi8fzIXB96Kw4xhjfVaBMognxGHjenfIz8zEQppd9v5Z1wRyhCTnI2iH7OR+nqs7bYhAMtsmRacEOJ1LJNhOpoQc8CAAXe6iIQQQsg96/z585BIJODz+eDz+eByueBwOGCxWMw/0nId2iJDCCGEEEIIIW2BAhlCCCGEEEKI16FAhhBCCCGEEOJ1KJAhhBBCCCGEeB0KZAghhBBCCCFehwIZQgghhBBCiNehQIYQQgghhBDidSiQIYQQQgghhHgdCmQIIYQQQgghXocCGUIIIYQQQojXoUCGEEIIIYQQ4nUokCGEEEIIIYR4HQpkCCGEEEIIIV6HAhlCCCGEEEKI16FAhhBCCCGEEOJ1KJAhhBBCCCGEeB0KZAghhBBCCCFehwIZQgghhBBCiNfh3ukCEEKINzIYDLh16xZMJhMCAwMhEAjadX9GoxEqlQo6nQ5msxkcDgddunQBl3tv3Ma1Wi0qKyvBZrM79Lhra2uhVqvB4/HQrVs3AEBlZSXq6+shEokgFos7pByEdHZ36hol9zb6KyOEdApqtRq1tbV23+NwOBAKhfDz8+vgUjXoDBVXlUoFrVYLFosFHo8HLpdLFQVCiEcMBgNUKhX0ej3MZjMAgM1mw8fHB/7+/mCzGzvqdPR9zxIIWcplD4vFatGDoxs3bkCv10MsFkMkErW2qKQToV9BQkinwmazweFwmGWz2QyDwYDq6mro9XoEBgbewdLdGQaDAQaDASwWCwEBAfDx8bnTRSKEeJm6ujqoVCqYzWaw2WzmQYjBYIBGo4FOp0NAQAB4PF677N9sNkOv1wMA+Hy+w/VYLJbDhzQsFssm2CKEAhlCSKfC5/ObBSu1tbWorq6GVqtFfX39PVeRNxqNMJlMNpUPQghxl16vR3V1NcxmM4RCISQSCfOe0WhEZWUl9Ho9amtrERAQ0C5l0Ol0bnU9Y7PZCAgIoHsdcQv9lRBCOj2RSIS6ujro9Xro9Xr4+Pg47CrQdEyD9VgWf39/aDQaGAwGAA0/mH5+fg67GjTt7lZbW4va2lqIRCIIhULmdYPBALVa7XK7dXV1qK6uhtFoZNbz9fV12m3D0r0DaKhwVFRUAIDNcWs0GtTU1DDbZbFYEAgEkEgkzZ5e1tTUQKPROCyDs7E/lu/Dx8eHCTaNRiPUajW0Wi3TJYTD4cDf3x++vr4tOn6tVtvs+/QkeNVqtUwLnuX74PF4kEgkNpUjk8nEdNmzlJ3H48Hf37/dxzwR0pEs17zlOrDG4XAgEomYa6Gqqgp1dXXM+9b3PbFYzNx7RSIR6uvrYTQa4ePjY3NPbG/W93kfHx/U1tbCZDIBALhcLtOyZH3/BBruYWq12ub+6c59ydEx34s9BDobCmQIIV7BUiF31n/aGbPZDLVaDTabDR6PZ9NljcPh2K0oWyqzWq0WBoMBfD4fPB7PppLraLs1NTU227W0KgGN3SosT0CNRqPDH0QfHx+wWCzU19eDxWIxy5ZtVFdXo6amBkBDJZzNZkOv1zM/tl26dGG+O0sgYvk8i8WCTqdzWQZHTCYT8ySXw+GAx+Mx3eBUKhVMJhNTWXD3+HU6HaqqqpgWKA6HA7PZDI1G41aZ6uvrmX1bxhHp9XrodDrcunULgYGB4PF4MJlMuHXrFvR6PbOewWCAXq9HVVUVAgICKJghdw1LUO/ob5rH44HD4cBkMjGBjbP7HgDmIQ+LxQKLxWrfA3DA8nCLy+WCw+HAaDTCYDCgqqoKXbt2hY+PDzgcjk3wweFwmHuQp/flznDMxBYFMoSQTs/y4wTAZvyMpwQCAQIDA5kfoFu3bkGr1UKr1ToMZAQCASorK2EwGMDj8WxaLlxtV6fTwcfHBzqdDjU1NWCz2UxFGmj4waysrHTaZc7X1xdsNpsJZEQiEdOqoNPpoNFowGKxIBaLmSei1pX06upqSCQS1NfXo66uDmw2GxKJhNlX0zJ40p2jvr6eqUR07dqVCZhUKhU0Gg0zUNiT46+pqYHJZIJAIECXLl2YfTlLBmFhMpmYzzcdoGw5JxqNBhKJhGmxadrNxlL22tpaCmTIXcFgMMBkMjkde8LlciGVSm1es3ffs8bj8RAYGMjck7VabdsX3gUWiwU/Pz8mEYyl+5rRaIROp4Ovry98fX2h0+lgNBrB5/OZhystuS83PWZy51EgQwjp1HQ6HdRqNdMtoml3JXdZggDrp2hcLhdarZbpktDW27V0VbDsQygU2gyk5fF44PF4TEDg6dif+vp6ptJv3a2DzWZDKBQyXb4sP+omkwk+Pj7NfpgFAgHq6upgMBg8CmQsrWNNB+BaupVZvhN3j9/SKsJisZp1UxEIBC5bZfR6PQwGA/NE2Zqvry/zvtFohFarZb4naz4+Psx3YTl/hNwt2nKgvK+vr9MKvaWbatPryLqLLAAmlbyj9y3sZSzjcDg22Sz5fD44HA70er3L67cl92VXx0w6XrsGMhcvXsTPP/+M/Pz8Zu8FBARg5MiRmDRpUnsWgRDiZerr6/Hnn382e53L5dod8+ENLK03Go3GYWW8JV3mLD/U9rIM8fl8sNlsmEwmGI1GZl17gYpEImFaJaxbmlwRCARMpeHatWtMkCQQCGyyErl7/K1NamAwGJg5dppWNixPZoHGCozZbMaNGzccbq+l3RgJIQ2adsGyfvhhvU7Tz9i7/u1lLGtN9672ui+TjtXmgUxmZiYyMjKQkZGBqqoqtz4zcuRILF68GFFRUQgODm7rIhFCvIh1+mXLeBMOh4PAwECvz2IjEAgcHkNLujFZD051xvrHuC0DQUuXMkvLj6VC4GgQf1sfvyPuHiOHw4FAILBbGWKz2V4ZNBPiTEdWzJt2V3M1Yab1/awjs5Z11H2JtI82+SupqqrCP//5T6SmprodvFjLz89HQkICEhISoFAokJSUBIVC0RZFI4R4mabpl2/evMn0ZW6vtKAdhcvltunEcpbWEE+6xrWmG52jMlgP1K+rq0N9fT2TRcj6nLk6/rbqY+/JMVqPOSLkbsTlcsFms23GGjZlMBhQWVnJXLP3UgW+re/LpGO1+u79z3/+E+vWrWsWwATfJ8CkMAmC7/OBIkzS7HMX/9Ti4p/1UOap8H2einldqVRCqVRCoVBg8+bNGDlyZGuLSAjxYiKRCHq93qvnkLE82dfpdG26XUvLlSUjkTXLmBhLNy3LuvYqMjU1Naivr4dQKHQ6UV1Tlsw+Pj4+4PP5Nv9UKhV0Oh0MBoPbx8/hcJjucJ6O1wEaKiQsFovpSmfdvUyr1TKZ5CxjmkwmE3Q6HQUy5K7H4/GYbIbWY0osLGPC7qUJJ9vrvkw6Vovv3hcvXsSSJUugVCqZ1wL8uVg0qzsWP9wDI0ObXyiOVFUb8PWxs1i7U4VLt8d3KZVKjBo1CuvWrUNSUlJLi0kI8XKWcRd1dXXQaDRMIGNpjdDpdDYDuzvjj5JAIGAGjtbU1NhUJGpqaqDVapvNceIOy/diaQmxjAExmUzQaDQwm83MOBY+n8+sax0Q6vV6aDQam6DH8vRWp9MxT2YtlX5rer2eGRhvPVjXuvuKZU4bd4/fMsjW+lwDsJnrxREej8ekW66trbV5ymqZudwysJfP5zP7sXxHQEP3lqqqKvD5fPj7+3t0PgjprHx9fZlrUKVS2WTqs2T7M5vN8PHxsTvm7m7UXvdl0rFadHaUSiXmzJlj0wozbyKwaSkLPYK7gefrfhADAL7mm5g+RIXp7wD/LQJW7AAT0Kxbtw75+fnYvXu313crIYS0jFAoZNIkWyZmEwgETCtNRUUF8yS+rbtOAY0D5K0rw560XFhSMFvmLKirq2MydFnGALWk7zqfz4dQKERNTQ2TNpjNZjOtMZbJHZuWoaqqCjweDywWi+maZp3NTCAQMMGApbuXve/WklpZq9Xi+vXr4PP5MBqN0Ov1TKXIMvDe3eP38/NjWuCuX79uk7DAFctEpCqViim7JbCxtNBYspRZ9qPX63Hjxg3w+XyYTCam7FR5IXcTS2CuVquh0WiYrH1AY5KMpmmW7d33WjPpJZ/PZx54OLu+TCaTw2EKllTznrLcByytzyKRqN3uy6RjedxUkMt5AAAgAElEQVR+uGfPHkyePJn5IxvaFzi2AdjyDODD1aOmosTjQmhrbjL/f3Aw8NM7wIb5je9nZGTY7JMQ9+UhuX9/9L/9b+mB5ikdSefH5/OZ1gZL64FQKISfnx/YbDYzkSGLxYK/v3+bT1QmEonA5/NhNpvdSutpj1gshkQiAYfDgcFgsJmgrWvXri1+Curv789kc7O0tpjNZvj6+tpMhmkpg7+/P7OuVqtlvjPrcUn+/v5MhcVS2beX0pjH46FLly5MEGBp8WGz2fD397epcLh7/JaZx3k8nk1Q5Ofn51baUx8fH2ZWb8t+LIFa0/1YJswzm82or69nym6dxY2Qu4VQKES3bt1sgnbLfVMoFNrMBQW0zX3PmmUiXlcPgSz7c/SvJQ+rLGPhLMdt2UZ73ZdJx2GZPQg39+zZgyVLljDLm56VYMH4GnDZjX/cLDYXXUPCPSqEqvwX6OtVzV4vKAPmJAOq21nxRo4ciePHj1PLTEucTkb/mJ23FxTY+OMuxPS8oyXqIHlI7j8XzJG/lYtdc6VOP+FNzGYz888ykFOn00Gn00GlUmHAgAF3uoiEEELIPev8+fOQSCRMEGcZs2hJTd3WD97uNW63yOTn52PlypXM8u5XQ/HivL42QQwA8EVdPS4E10dk9/VhfYG8f3IxYbiQKYN1IEUIIYQQQgi5N7kVyFRVVdmMiVn8cA8sfrgHeL4S+PcIBZvbMBhU4N8Dft37e1wI38BeYLHt95fsGiDClr8NRIB/w/sZGRlYt26dx/sghBBCCCGE3D3cCmRWrlyJixcvAgAUYRLsfjWUeU8g7oEu/cai24CJ8O8Z6jAgcVoIrg8kQcOZgMiC5yOBuNcQjAz1w/Gtw5hgZv369cjPz/d4P4QQQgghhJC7g8tARqlUYs+ePQAa0isfenNIuxSEyxehS7+xkPQeDknv4QjoGwZJ0HAmMBoZ6od1cX2Y9a27uRFCCCGEEELuLS4DmfXr1zP/T03oz7SKuMtkqIe+TgWDrtat9Xm+EvB8JeDym4+befHxXhgZ2vC6ZeJMQgghhBBCyL3HaSBjHSwowiRY9HAPtzdsNhmguvwLbl04BdXlX1BVlodbF05BV3vT9Yed2JzQOAbHOsgihBBCCCGE3DucBjKWLmUAkDCvl9sbNZsMqCw7A32dbUplk6Ee6j/OQqu+5mExGynCAqAIa8jvr1QqaawMIYQQQggh9yCHgUxVVRUyMzMBAMH3CRD1kPtplWuul8JkqHf8fkUpzCaDB8W0tdiqZcg62GpTRjWKj+5D0rJIRI4fzEyo2H/0dEQuS8K+o8VQu5wbqgLpS+1PxlhxOh0pqxZYbXswxj2yAAnblShWe1jWq3lI37gMkdNGMvsaOS0SyzamI++y1tMjd8H2mPpvzHOjfOlYajUpZfJpO+ucTm7c5kNbUXj7ZXWJEvtetXNsr+5DTpsfmxV1MXIOpCBhYSTGDetve442t/x71V7OQ/rmBCyImo6R/dtou1fz8Pmb8Zgz8wHI5XKMHDkSUx5b0KLyEUIIIYR4C4cDXpRKJZNuOXqSZ3PDuOo+ZjYZoKu5CYHY/a5q1hY93AMJqaWoqjYgMzMTqampLdqOI+XZa/B8YjoK7Q3rqSxG4ZFiJB3Zh6TA8Ujc8yHihgnsrOiAphj7Vs9F0tdNoxUtKs7+iKyzPyJrixzxuw9g1RgX2zWqkffh81i68Uc03Zq6pBBHStbgyPYkyJe+j11T3S/iHXdZBa2xHFmvPo+X9xeiadW+4diScCQtGWEv7ML7K8IhdT3ht3ucfKe25ygJ8nnrsfmVGMiEbmxXnYedy5ci+T/2otSm203FttdnIMjVMRnVyNv5PJ56s6Gs1nPbVl/83Y1CEUIIIYR4L4ctMtYD6aM9aI0B4FZri1HvuMXGHSMHNAz6v3jxIhNwtZ4WeZsjMf3FJkGMSAr5MDnkQ6SwCS0qf0RyVCSST7v3FL2i/DCSo6bbCWKaqC3E1iXLkH7ZRVk3zcVcuxVu2/UKdy3F/Ne8KTFCMd57bjoS7AQxtrTI2zIf8zfluVjPTcZyZL34kJ3vVADpEBnETfZduH8NpkclI8dVC9rlLCybPLdZECMOkUM+TA5ZYNPtLsP059JR7rTFr+H8P/amq/NPCCGEEHJ3chjIWI89Gdq1DKrLv0Bz65JbQYo7c8lweD5uFtE+yzgZAG2Wvazi0DLM39JYeRZPWIW040UoLchFdmY2sr/MRdH5Iny/Iw5yJqlaMXau3YlCl93MgMItSdhZAiBwPFbt+Ba5RaUoLW34V3T8ABJnWVWVa5VI+jDHYQVd/fXLmL+9uPEFkRwxb2Uj91erbeZ+i10rx0MMoLik2MGWOiMllEe1Dcf0+gF8n19k95gsirfPx8uugkOXtMjb9BQSrLYjnrAKu77LR2lpEXK//Bb5paUoys3GxnnyxoC2ZCfmL9mHYkfnX5uHlCUJOFJ5e9lynopKkf9dNrIzs/HtTw3bXW91/rVHk5ByxPExNTv/wiGY+8YhfH+qEPn5+Th16lSLvgVCCCGEEG/hMJD5/vvvAQBD+wJmnRr6OhU0N8tQdfkXl8EMX+S8BYfF5oInlDhdxxGDrhaaW5fwoLwxWGqbAf+FSN+sZAIHwdSNyN4dj/C+Tbp3cQQImpqIA9vjEGR5reQ9pOe41yYgGBKPAyc+RvxUGaRWmxb0DUPcP7OxUdH4mvajw7C7WW0e3tuY1RjkiBTY+E02Ns6VQ2rVzUkglUHxwsfITY+DzK3SdSKWY5ofhiBx4xdl/5i0yNq4z61g0hHt6RSstAoMZIt24bvd8VCE2LbDCKRyxCRn49tkRWMwcyYJaz62Hyiqj+7F1hJmq4jbfaDhPDX5sxJI5Yi1Of9aZH2YiXK7hW16/ich+atM/L9Hh6CbO93cCCGEEELuAi7nkZE0qRgZtbWouV7q9DN+3fs7bZXxk/YHm+t5i0xd1RVUleVBc7MMYqPTfleeq7iM8sCGrj7yYXLELY1yOkZBEB6FmN6WJS2UBW60eIgUWP/BKoQ5qmxyghC1MNbqhTyU2zlM9dG92Mm8LkDslvetymKnrGMS8WGywvEKnU4Q4ra7PqZtSWGNL1x2P5hsTo0je3Y2Bg0h8di4RuF03E3QvPfx/vzGaCTv7X12g87iyyXM35R8RhxinY174gQhYu7sxuUzP+GsnUaZpud/wbvb8JiT74oQQggh5G7kMpCxR1vtPH0yi81FYN9R4PlImr3u3yO0xYP8aysaA6j+PXTM/y9evNii7dmQzsDGzIauPtmZ2VgV7moAf3dIrZo5ylVuVKIff9pp5RwABDI5xjNLhSivbLqGFnk5WY2LvZcjZqLrZANBMm9qkwmBrJ/rY5LNXogZzJIW6f8pdLK2E2WZ2Pt14+KMFU8jzOXuBVDEJYIJpWr34bOjzaOOsGca/6ay349pbMVzQNy9u9WSGipN0zWanv94PDbBg2QThBBCCCF3CZeBzNA+Ldww1weSoOHo0u8BSHoPh6T3cHQNCW9xEGPQ2Ush1qBNApmOwHWjwskF+E5XKEbh941LglnhkLdVxi5vE6hAtFUDhrag2H5XLBcq8o6jMYm0ApMfEDtZ20rfcEQNa1zMymthIOWRJud/5j18/gkhhBByT3M5Kv/XS81fY7tTIWfW9XG7G5llAk2eb/PxM1y+CGyuACZDQ8tH+Y3G9xSKdug2pS1H4bdKZCqPIOf38yg+W9E2mbFaS30Z5626m4XLXD3jv5uJETJKDmTdDiByilEBuGz1aKr8d6tkEb1HQ97T3U/KIA8HUHB7sbQcFQiH1Mkn1CU5OHz0MI5/lYeSy4Uobtbi5gKdf0IIIYQQAG4EMvb4BvZq00KYTQZUXf4FRm1DqwtHIEJA7+HNxtmIe8lRe60E+noV6gw+AFqXwtkuYwVytq3Ess2dNK2tRmVTLlmws2rz3U8iau3xa6G2buyTSdHd4brNSYPHA/ixYUHZ0CJkr0Tqs+lIXp2E9LOtDIebnP+QYCkAs6O1CSGEEELuWg4Dmb59+6KsrMym5QMAfAPuh29A2wYyNddLmSAGaEgooLlZBpE0xLawfBEkQcMBAF+duwbgHAAgICCgbQqiKcTWxXORYjMvjBiyCQooRgXZziEDNQoP7YOyjXMOkI6mxvUrLf+0D9d5R0AAKP96DZ5ang7rdBCCnnIopiog69Jk5cs52HooD4QQQgghxDmHgczIkSNRVlaGSxWATtAPXQN44AklbnUTM5sMqKv6Ayw2Fz7i7i7nlTHZmRzTUO94TAwA5J+vsSlr62mhfMM6iJEh5q3NSJwjh9juGIQKpBdQIOP9xOjeirhcVVvhfIWSfVhpFcQIxsRj1zvLEd7bQffM08kUyBBCCCGEuMHhYH/r4OC7Ah4E4h5uj3WpuvwLNDfLUFtRAvUfZ12ub29OGb6/87lolHkq5v9tMkamLB3vpTGzyEDx1ofYONdREHMHBXa3GQNy/Xqn7ADXYa5XlLheySkBxCKrxZtaqByu21xFudUA/3BZk25lWuR8nNyYSCAkDml7VjkOYtzR5PxX3OPnnxBCCCH3LoeBTHR0NPP/jB9uerRR625ilgH8zvgG9rKZRJMv6uq0+1rZn/XIP9ewjxEjRnhUNkdsM1fF4uk5nXQQtUCK7lYpnM+UtSRP191Cjcu/Wx1/s0DCPUEDrQLhgp9Q4nZsUIziU1aLg4OaJBooxI8HGrspjn8q1vEcQu6i808IIYQQAsBFi0zfvn0BNLR+qGoMbm/Uev6YpnPJ2MNicyG+fwi6hoSja0g4xPcPcbr+nq8a57FJSEhwu1xONZkVXtDZWmIYMsgnNS6Vf6ps1Yz2Xk2bhx+tplQRjJF5nLEMAKTDJ0POLGUh43s3I5myHGQWNC7ODpM3W8Uqpge/BZPANtfk/O+/h88/IYQQQu5pTueRsbTKVFUbkLrf/RHR4l5DIJL2h0jaH+JezoMSayw21+V4GsA2kLFuOWo711Hhqi5bchjpShfrtAsBwmfENiYeuPwe0k+4zoRVXNhW4y6ajCnJKbQZxG6P+tRxePZVlaDCxdATACg/tBf7mCUBYieGOVnbiRAFYkY1Lh55dwfyXH6lWih3WnUbE8Xi8anO558pqbjuepuZ+1ys0/T8b8Xn/+kUicEJIYQQQjqU00DGurUj9bM/3G6VYbG58A3oBd+AXm4FJp5Yv7MMF/9sqLgtWrSozTKWSYNlVlnJspCyLc/xvDFl6VgWk4Q7NSRbEB6D5UxCNy32vbAM6U6SDqhzkvH8+rYqrQDyEdZdsXYiPcdxRVp7bh8SEo94uI9ypPx/ychxFkyWpSPp/1mFRyHLETXK8erOBSFmbVxja07JVqzZnAO1k5aO8gPLsCyt8bhlz8cgvNnQlyDIrL6q8m0pjs+TUY2czXNttulI0/P/8Yrn8TklnSCEEELIPcZpIBMcHIxFixYBaGiVWbezrEMK5UjZn/VI/ewPZnndunVtt/FRUVhuNfagePt8zE3ch7wyNbRGAFo1KkpykL5xAUZOXoMjTScyvFrRcfPOcOSI+4dVxbtWiTUzI7HmQCEqNI2raa8WQ7llAR6av9Nlq4knpBMi0Fg/L8fOZ+Yj5T/lDd8TABi1UJcVImvLAoyLSILSeQI6+0p2Yv7kBdhqvV0AMKpReGANIh9ZY7VdAWL/HteqGe4FY1Zh8zMyZrl4+3w8tGQrcsqaBBaVxVBunovpq5WNgW5IPDYubd6tDJAiYs6MxsVaJdZELcDWo8XMedJWlqMweycSosZh/pbCJsFzCSpu2dlss/P/PRIfjsLfvziLGxo76xNCCCGE3IVcNpekpqYiIyMDKpUKqfv/QPRDXTEprI3mbWlCX6dCveoaTIZ6cAV+8A283yZTWkJqKaqqG1qFXnzxRQQHB7fdzjlyxL0Th8wYS6Vfi8L9SZi7P8n++qIwhIXmIe/M7eUKFVQAnHcuajuC8FXY/EJOY+W3thDpqyORvtr++rKpCgiOKlFo/23P9IzC8hd2QrnldnhUm4etCydhq6P1RQIIarWOW7iaEUAcCKgrf0TKwklIQcO8KzJROQrtjMSXPZOGREUrMoHd3mfYSx8i9XIkEr5u2If6PymYPzkFgBiyYUFQlRaiomlQFhKLXZ+sQpiD3YtnJWJjlhJrjt4++sofkfL0dKQ4KIVsTBi0p/PQMIS/HCoHuTKanX/NWRxYOwcH1gJmsxlmsxn//ve/PTh+QgghhBDv4rRFBmiYbHLPnj3McvTLRc26mOnrVNDXqWA2uZ8QoKnqq+eguvwLtNXXoK9Toa7qCirLzjBZz1amliDj+4bsaX379m3b1pjbBGMSceDgKoSJXKw3JA67jh1A4gSr2mtOIUo6dKiCAGErDyD79RkugicB5Et3IS0pokUZvRzue0Uadi2Vw1X4IJ6QiOxj7yPGo+2HI/HTbKyf1Xhk2quFdoIYAcJeOoADL4W5LIdbOEGY/c8fcGDN+CbfqRrFBU2DGAHk8zbi28z1UDj7YjlBiPngW6Q+JnOyEgCIMeP1b5G9dSGsp4HNOeeoLa3h/Gf9w9X5J53RW2+9hYkTJ6K4uC3bSt1z8+ZNLFiwAEuWLEF1dXWH758Q0qi0tBTx8fH44IMP2n1fdXV1eOONN/DSSy+hsrJptxJbN2/eRGJiIl599VXU1NTYvHfhwgW8/vrriIuLQ1xcHPLz89uz2IQ45dYAlujoaERFRSEzMxNV1QYonv8Fym3DIfHjQnX5FybYYLG5kAQNB5fvIhJoQl+ngrb6WrPXzSYD1H+cxZFzwUjd39ilbM+ePW02NqYp8ah4HDg9G3nZ+7BjnxI5BcUNXcYCZZCPVSD2qVhEjQqCgAOoB84AYEmblY7jpxKhmNAmVWo3CSCb/z7yp+Yhfc8O7Duaw1T2xSFyhE+NxdMLohDWWwBolS3K6OUQRwrFK9nIjcrCjj3pUOb8hMKr2sZ9/18MHl8cBUWIGECe54GGWIbY9/IRVaJE5p7PkP6/xmMT9JRDMTUGC5+Nad2cLPZwxAh75mPkzyuGMjsdGUdykHPGEsQIIB0yGuFTZ2PhY7e/V7e2GYTZb34LxTNKpG/biUzmu7q9vdlxWP6YArJAANBidG8wE60WZuWgfJGjbGwCyOZvQ96Un5C+Zyf2Hc3B2dKG78g/eGArv4iOl5ubiyeeeKLZ61KpFAqFArGxsZDL5WCxWHegdN6nsrISTz31FPr06YM33ngDvr6+AAAOhwMfHx906dIFXG7bjmEk5G508+ZNHDp0CPn5+dBoNGCz2ejbty8iIyMxYsQIm3vSBx98gIKCAqxatQr9+/fvkPLpdDocO3YMR48exa1bDf2Ru3TpgoceegjTpk2Dj4/nGTO5XC54PB5EIhE4nMZ+29evX8d7770HlUqFwYMHQyqV4v7770dlZSWSk5PRpUsXJCQkMPcbT1i2cfOm4yk/unbtisTERAQGBnq07TtxXkjHcPtXbM+ePVAoFPj555+Rf64Wiud/wfGU+2CwmifGbDKg9noJJL2He1SIukrHGdGOnAHmv9341HL37t1tMwGmM4IghM1NxPtzE52uJp6VitLSVCdrSBGzq9Sz1oieMdhV6ln7BXqGIWbN+4hZ42QdgQLrS0ux3rMtuyQeNhurUmZjldO1wpBYWgrn36aD7YcoEPu6ArEtK17L9y2WQTE/EYr5Ld5x802GKBCXokCc07XkiP+hFPGebLhnGB57eSse/ZsRBoMBOp0OOp0OKkf90jq5qVOnQi5vHHN08+ZNfPnll8jKysIbb7zRTpkK7x0BAQHYuXPnnS4GIV6hoKAAO3bsgEajQUhICKRSKerr61FUVIQtW7ZgypQpePzxx9vtoYBOp0NtbS18fHzsBgcajQbvvvsuzp07B6lUinHjxoHNZuPSpUvIyMjA//73PyQkJEAq9axPhkQiwfr1zWsM5eXlqKysxKxZszB37lzmdVctPABgMplQXV0NNpsNPz8/hw+lxGIxBg8ebBNAWYhEIvD5fA+OhNzt3L7yAgICoFQqbYKZ4MeLkfUKF0OCGruUGbSej+w2G+13SXvlY+BfRxrfS0pKwuLFiz3ePiHEe0yfPh2PPfaYzWsLFy7Ec889h927dyM8PBw9evS4Q6UjhNwrrl+/jo8//hgAsHr1aoSGhjLv1dTU4L333sP333+PYcOGYfhwzx7guqugoABbt25FdHQ0Zs+e3ez9r7/+GufOncMjjzyC6OhosNkNIwbMZjOOHz+OTz/9FBkZGVi6dGmblOfKlYYHz/369fP4syqVyq1Wmx49emDhwoUtatUh9x6PHiE0DWZUGmBSogHPzgASH2NBKDCDL+rqeSF8RNDXNz49/rEI+Hsa8KtVkrTdu3dTEEPIPUomkyEiIgI7duxAeXk5evTogbfeegvZ2dnYvXs3ZDKrjHPFxViyZAkiIyOxevVq1NXVYe3atbh06RI2bdqETz/9FAcPHkRtbS3Cw8PxyiuvYMCAAU73r9PpkJWVhV27duG3336DRCLBzJkzER8fj169etmse+XKFWzduhXffPMNamtr8cADDyA+Ph7h4eEuu8W5+9n6+np88sknSEtLw4ULFyCVSjFv3jwsXrwYPj4+WLt2LbKyGrq95ufnIysrC7Nnz8Ybb7yB+vp6u13OzGYzCgsLsW3bNhw/fhxGo9Hu/i1dAN9880306NEDb7/9Nn799VdIpVI8++yzWLBgAT0xJXeFH374ARUVFXj88cdtghgA8PPzQ2RkJLZs2YITJ06gtLSUueYAYMOGDfD19cWqVatw3333ITU1Fbdu3UJMTAwOHTqEa9euYfny5ZBIXE8a7ohOp0NZWRl8fX0xcuRIJogBABaLhXHjxuGHH35AcXExampqbK7LqqoqpKWl4ZdffoHRaESfPn2wcOFCJkBp2lWsvr7eptvX1q0N6X1Gjx4NtVqN8+fPA2hoQY+Pj8eAAQNa3MXMHaWlpUhJScGwYcPw0EMPIS0tDVevXgWHw8HIkSOxcOFC+Pn5ISsrCxkZGcznrM+LpYvZH3/8gU8//RS///47jEYjunfvjujoaIwdO5a571m6psXGxuLo0aMoLS3Fo48+iocffrhdjo+4z+O2UEsws3jxYmRmZgIA/nUE2H/CjPdfEGHW1D4eF8I3sBc0Vdfw22UTNh4w4+ufGt+TSCRITU2lIIaQexiLxUJQUBAMBgOMRicT/Dhx4cIFLF26FDKZDIsWLUJFRQW++OIL/O1vf8P27dsdtvIYjUZs27YN27ZtQ0REBJ5++mmcO3cOX3zxBXJzc7F9+3aEhDSkaDh//jyee+45SCQSrFixAgCQnZ2NxYsXY+PGjXj00Ucdls/dz2o0Grz22mvIyspCREQEli9fjoKCAmzfvh0nT57EO++8g6eeegoRERHYtGkTevbsibi4OHTr1g18Ph/19fV29//111/jlVdeQc+ePfHSSy+BxWIx+09OTsZf//pXm2Bq+/bt0Ol0mDZtGiZNmoT//ve/2LBhA/h8PhYsWOD5CSKkE9FqtSguLoa/v79NV1drvXv3RlBQEPR6PYYNG4aePXvi6NGjKC8vx6xZs3Dfffehe/fuzPpVVVX48MMPweVyERgY2OruaHw+H2KxGDqdDlevXm029sPHxwePPfYYdDodfH19mXunSqVCSkoKunTpgrFjx0KlUqGoqAj/+te/8Le//Q3dunVrti+RSIQnn3wSOTk5OH36NKZNm4Z+/fohMDAQBoMBV69eRWZmJkQiEWbNmoUuXbp0yAONgoICFBQUQCaToV+/frhw4QJOnz4Ns9mM5557DmFhYejevbvD81JcXIx3330XAoEAU6dOBQCcPn0a27dvR0VFBR555BFmXzqdjkl+FRgYSA9sOokWXUUBAQHIyMhAamoq1q1bB5VKBZUGePLNWgRsO4Poh7oielJXRD3kunXm5/M1UOapkP4tBz8W2lZQJk2ahD179rRtmmVCiNfR6XQ4c+YMevXqha5dPW/1BYDa2lqsXbsWc+fOBZvNhtlsRmhoKNavX4+zZ886DGQuXbqEzMxMzJs3D0lJSUy/7fDwcMTHx+PIkSN4/vnnUV1djZSUFMjlciQnJ8PPzw8A8Pjjj2Pt2rX46KOP8OCDD9rdjyef/eqrr5CVlWUTXMyZMwcKhQLx8fE4evQoFixYgN69e0MikUAqleL//u//nD4ZvXTpErZs2YJRo0Zh06ZNzHf8+OOP47XXXkNKSgqGDh2KQYMGMZ/p1q0b3nnnHdx3330AgCeffBLPPPMMTpw4gTlz5kAk8izpCyGdiUajwc2bN+Hn5wex2H5uSIlEgr///e/MckhICPLz8/HHH39g6NChTGBRV1cHoGGMyCOPPILIyEim9aS0tLRV5Zw8eTIKCgqwd+9elJaWIiIigglEOByOTRBmKYfRaMS0adOYcpjNZnz++ec4fPgwSkpK7AYyfD4fo0aNQnl5OU6fPo3Q0FCMHj2aef/+++/H4cOHIRaLMWbMmA7rFsbhcLB8+XKmxaympgYpKSkoLi7GjRs30Lt3b/Tu3dvueampqUFaWhp69OiBF198kbnvRkZGYvPmzTh+/DjGjh3LBD1GoxFjx47FokWLKIjpRFr1OCAhIQHR0dFYt24dPvroIwANE2fu+eoa9nzVkIVsZKgIAX5cKMIamk+ragzIP9cwjib/fC0zL4w1S3plaoUhhFRVVWHfvn04dOgQnn766Rb1zQaAwYMHY+rUqUwFgsViMRXzGzduOPyc0diQRMHf39+m68aDDz6IEydOgMfjAQBKSkrw3//+F++++y7zgwgAQqEQEyZMwLFjx/Dnn3/aDWTc/axYLMZ///tfjB07Fn/5y19sWkhGjRqFiIgIlJeXQ6v1LBd8bm4uSktLsXbtWptAUSgU4sknn8SRI0dw7Ngxm0AmKiqKCWKAhgdcISEhuHTpEsnmEL0AACAASURBVHQ6HQUy5K7g5+fHXOOtFRAQgAkTJtjcR5qydJmyBB0WGRkZNl2kLGNmZDIZnnvuOXz00Uc4duwYjh07Bj8/PzzwwAOYNm0aevbs6bIcLBaLaVW+dq15BtmWqqurQ2pqKtPtzMLS/czC0g3N4vz58zbvW9jLWDZo0CCbrsF+fn7o0aMHrl+/jtpa52O2L1y4gMuXLzPd0CyEQiGGDRuG7OxsXLt2jQlkfH19MWXKFApiOplWp9kIDg7Gnj17sG7dOmbyzLKyxsEtlqBFmec6g9KIESOQkJBAAQwh97CXX34ZL7/8ss1rXC4XCxcuRHx8vN1MNu7gcDhOKxCO9OrVC+Hh4dixYwdKSkrw+OOPY8SIEQgMDLRJA3/+/HnU1tbiqaeecrgtRwGGu5+tr6/HpUuXMGjQoGaBgr+/PzZt2sQsazQadw8RFy5cQK9evdCnT/Ouwf369cOAAQNQWlpqU/6WngdC7lVsNtvlPcjS7czSoqHT6VBTUwOhUGiTRtm6xWPIkCF44403cP78eXz33Xf49ddfcfz4cSiVSqYFwfqz7pSjLbDZbPj7+6NLly4AGlqk1Go187rlQUzTh0SOspbZy1jGZrNbnJL/woULMBqN2L17N3bv3m13HZ1OZ7NM6f87nzbLFxgcHIzU1FQmmMnPz4dSqUR+fr7DNLB9+/aFQqHAyJEjER0dTV3ICCE26Zfr6uqQlZWFcePGYdWqVXcki42vry82bNiAiRMnYtu2bUyw8cADD+DFF1+0GQgfGBiI1atXo3fv3na35SypgCefDQgIaLOnxHV1dfjjjz+aBWZNGQwGmEymNtknId5Cq9V26N99nz59sGHDBmb5p59+wtatWzF9+nS7Wcss2Gw2Bg4ciIEDB8JkMuH8+fPYv38/cnNzIRQK78i4NYFAgOXLlzPLruaasTx86cisZTweDzNnzrTbcgW0LDsb6Vjtkvg8Ojq62VwPFy9exMWLFwGg/eeBId5nTCJKS1sy0wy521inXzabzQgMDMQ777yD6dOnY9asWXekTHw+H5GRkYiMjIRarcaZM2ewbds2LF68GKmpqUy5KisrERwcjHHjxnm8D3c+a5mroaqqCnq9vk2CGV9fX9x///34+eefUVVV1WyiOZPJ1OIEC4R4K6FQiK5du+LatWuoqamx21VSpVLhvffeg0gkwrPPPtvhD1pqampQVFSEgIAAmwcdlqAmISEBb775JvLz8zFr1iwIhcIOLZ830Ov1CAoKshnvQ7xL+7ct3hYcHAyFQkFBDCHEbZaB7CNGjMDevXtRUVHR4WUoKSlBTk4O1Go1gIZuD5MmTUJqaipCQkJw7NgxaLVaZoCsJWOOJ9z9rI+PD/r06YNLly416/9dV1eHtLQ0HDhwAHq93qP99+vXD1euXMGlS5eavVdWVobS0lKEhobSvA7kniEQCCCTyaBSqZCfn293nT///BPl5eXg8Xh3ZNzEjRs3sHfvXmRkZDTrAgU0PIDx9/eHTqdDdXV1h5evs7M8tCkoKPD4nk06jw4LZAghpCV69OiBJUuWIC8vD19//TXzgzNgwABcuXIFRUVFzLpmsxn5+fltOmD13LlzmD9/Pg4fPmzzY6fX66HT6cDn88FmszF06FCMHTsW+/fvtykTAPz22294//33HaY+dvezvr6+ePDBB3Hy5En8+9//tinPmTNn8Pbbb6O+vt6mpcZkMrn8kR43bhz69++PTz75xKYrsEajwSeffAI/Pz9MmTLF9ZdFyF1kwoQJCAgIwFdffYVz587ZvKfRaJCRkQGTyYSJEyc2G8/RERXj+++/H/369cP58+fx448/NtvnlStX8Oeff0IqldqkgW5Pnb37qfV3NHjwYHTv3h2nTp3C77//brPeuXPnsHfvXof3bNJ5tEvXMkIIaUsTJ07ElClTsHPnTjz44IOQyWQYPXo0QkNDsWHDBpw5cwZisRgFBQU4ceIEDIbm2RBbatKkSXj00UfxyiuvICcnBxEREbhy5Qr27duHqqoqREVFgcfjQSqVYsWKFVi5ciWWLFmCRx99FEOGDMGpU6fw+eefY+bMmQ7L5clnH374YeTm5uLVV19Fbm4uHnroIfz88884ePAghg8fjunTpwNoyN4zaNAgHDp0CP7+/hg1ahSioqLs7r9Pnz544YUX8MorrzCTiQqFQnz22Wf49ddfkZycjIEDB7bZd0qIN+jZsycWLFiADz/8EG+99RYGDx4MiUSC+vp6nDt3DrW1tYiOjsawYcOYz8hkMpw8eRK7du3CwIEDERERAX9//xaXYdiwYUhJSbEZrG/B5/MRGxuL1NRU7N27F9988w1CQkLAZrNx48YNlJaWgs1mY8aMGRAKhc0yobUlkUiEnj17oqioCNu2bUNoaChmzJhh01IlkUjwyiuvgM1m2z0ei2vXrmHv3r12E4qIRCKnY4UcsXdeunfvjjlz5mD37t1ITU3FiBEj0L9/fxQVFeHXX3+FTCZr098S0j4okCGEdHr+/v5YuHAhnn32WXz66adITExEnz598Pbbb2PTpk34+OOPIRKJMHPmTBw8eBBJSUlttm+hUIh//OMfGDJkCA4cOIDMzExIJBJMmDABy5Ytw5AhQ5h1x48fj88//xwffPAB9u/fD5VKhUGDBuH111/H7NmznXY/cfezQqEQGzZswJAhQ5CWloaDBw+iX79+eOGFFzBv3jymHzyPx0NcXBwuXbqETz/9FEajEZGRkQ73P2vWLPTt2xfbtm3DW2+9BaPRiPDwcKSlpWHMmDGUrYfck8LCwtC3b18cOnQIp0+fhk6nA5fLRZ8+ffDoo49i8ODBNtfG+PHjce7cOeTl5aG6uhqTJk1qVSDD5/Od3jekUimSkpKgVCrx/fff4+TJkzCZTBAKhRg9ejQeeeQR9OrVq8X796Scf/3rX7F9+3YUFhbCbDYzD1Us2Gw2JBKJy22p1Wrk5ubafa9r166IiIjwuHz2zgvQ0Brdq1cvfPbZZzhz5gxOnToFiUSCqKioZoEY6ZxYZuoYSIjXMpvNzD/LfCc6nQ46nQ4qlcpplixCCCGEtK/z589DIpEwQSmXywWHwwGLxWL+kZajMTKEEEIIIYQQr0OBDCGEEEIIIcTrUCBDCCGEEEII8ToUyBBCCCGEEEK8DgUyhBBCCCGEEK9DgQwhhBBCCCHE61AgQwghhBBCCPE6FMgQQgghhBBCvA4FMoQQQgghhBCvQ4EMIYQQQgghxOtQIEMIIYQQQgjxOhTIEEIIIYQQQrwOBTKEEEIIIYQQr0OBDCGEEEIIIcTrUCBDCCGEEEII8ToUyBBCCCGEEEK8DgUyhBBCCCGEEK9DgQwhhBBCCCHE61AgQwghhBBCCPE6FMgQQgghhBBCvA4FMoQQQgghhBCvQ4EMIYQQQgghxOtQIEMIIYQQQgjxOhTIEEIIIYQQQrwOBTKEEEIIIYQQr0OBDCGEEEIIIcTrUCBDCCFtyGQy4fDhw5g9ezb69++PJ554AlVVVXe6WIQQ0ulkZWVh6dKl+Omnn+50UYiX4t7pAhBCCADk5ubiiSeeaPa6VCqFQqFAbGws5HI5WCzWHSid+06cOIEVK1agV69eeO6559C/f3+IRCIUFxdjyZIliIyMxOrVq+90MQkhxCWz2YyioiJ88cUXuHTpEgwGA/h8PkaNGoWoqCj07NmTWbeyshLJycno0qULEhIS4Ovr2+7ly8rKQkZGhtN14uPjMXr0aGbZZDLh/Pnz+PLLLyGRSBAXF9fsM5bjPnjwIMrKymA2m9GlSxdER0cjPDwcbDa1A3QWFMgQQjqVqVOnQi6XM8s3b97El19+iaysLLzxxhuIjo6+g6VzLTc3F127dsX777+PQYMG3eniEEJIixgMBmRkZOCbb76Bj48Phg8fDh8fH1RUVODU/8/evcdFVeePH38N4ATIVVQg0gQFRWUTNbmttaFSYVpiWqlpgnYRM/tZtl6yXcXcvJSlGGtec9VWQ03TrupufuXimpAamii0IomrNHgdBIbz+2OYkYEBucPo+/l4zKPmzOd8zvucMzLnfT6X85//cPToUWJiYujdu3ejxaDVaiksLKR169ao1eoqy3Xr1o02bdqY/czNzQ2Ay5cv88MPP7B3716uXLkCQL9+/cyu8+OPP7Jy5UruueceHnroIRwcHEhNTWXNmjX8/vvvPPHEEy3+ptrdQhIZIUSLEhERwYgRI0yWjR07lpdffpm1a9cSEhKCu7t7M0VXPa1Wy2+//Yanp2eLjVEIIWri0KFDfPPNN/j6+jJ58mQcHByMn506dYrly5eTmJhI586dcXZ2bpQYvvvuO3bs2FGpVaWiAQMGVPu5VqtlxYoVnD59mo4dO/LHP/6R/fv3my1748YN9uzZg4ODA9OnTze2Oj322GN88MEH/Otf/yIoKIj27dvXb+dEg5C2MSFEi9elSxcee+wxTpw4QU5ODgALFy6kf//+nD592qTs6dOn6d+/PwsXLgT0P2BTp04lKiqKM2fOEBcXR2BgIH5+fowbN47MzMzbbl+n07Fv3z5Gjx6Nn58f/v7+xMTEcPToURRFAfQtMT169GDnzp2kp6fTp08ffHx8+P7774mKiiIiIoLc3FwSEhLw8fExxmeQkZHBpEmT8Pf3x9/fn0mTJpGRkVFpvxYsWMCuXbsICwtjxIgRMv5GCNHgbty4wffff4+dnR2jRo0ySWIAfH19CQkJIS8vj7S0NBYsWMC0adPIz88nMzOT2NhYFixYgFarJSsri9jYWFasWME333xDbGwsb731FpcvX26y/bG2tiYiIoL333+fd955p9pWpP/9739cvHiRrl27mtyQsre3JygoCI1GQ3Z2dlOELWpAEhkhRIunUqno0KEDJSUl6HS6OtWRnZ1NdHQ02dnZjBs3jhEjRnDo0CHeeOMNLly4UOV6Op2OZcuW8fLLL9O+fXuWLVvGX/7yFzQaDePGjePQoUOA/od99erVhISE4O3tzYoVK9iwYQM9e/Zk5syZvPfee7i5uTFkyBA2bNhg0kUuKSmJ0aNHc/36df7yl78wZcoUTpw4wejRo0lNTTWJZ8eOHcyZMwcnJyc8PT3rdCyEEKI6hov5+++/32QcjIFKpaJnz564urpy5coVhg4dyujRo3FwcMDd3Z3x48czdOhQk+5gR48eJTExkVatWuHk5NSUu4NaraZPnz41ajnKz8/n+vXreHp6Vuo+5u7ujrW1dbW/GaJpSdcyIUSLV1RURFpaGl5eXsb+zrV1/fp1ZsyYwdNPP42VlRWKouDn58df//pXMjIyquwKlpuby9GjR3nllVeYMmUK1tbWADz00ENER0fzxRdf0Lt3b9q0aUNwcDBffPEFWq2WoKAgXF1dAfDw8MDFxQVbW1u8vLwICwsz1n/+/HkWLFjAc889x9SpU40//FFRUUyePJl169YREBBgLO/g4EB8fLyMvxFCNBrDxXzbtm2rHJsSEBDA4sWLje/vvfdevv76a5ycnOjbt2+lwf5qtZrJkyfj5+fXqLE3JsNYHY1G09yhiDI2hm4RQgjLoyhKla87RUFBARs2bGD79u1MnDgRb2/vOtXj7+/PgAEDjLPNqFQqYzJw6dKlKtfr2LEja9asqbTcxcWFrl27cuPGDUpKSmjVqlWd4kpPTycvL4/BgwebXDC4u7vz4IMP8v3335Ofn29cPnDgQLp27VqnbQkhRG0YbsY0hG7duuHr61ttmR9//JH4+PhKyysuqzhmxtw65so1FK1WW6vyd/JvdHOTFhkhRIvy1ltv8dZbb5kss7GxYezYscTGxhpbRGrL2tq6zlNmFhUVceDAAT799FMOHTrEzZs3jZ8NHTq0TnUaHDt2jPz8fIYMGWL2cy8vL5PtWVtby2w5QgiLY2Vlddu/XWq12mT2scLCQm7cuIGDg4PJjZ6KrURVzVpW1xZ8YTkkkRFCtCjlp1/WarXs3LmToKAgpk2b1iTPJahIp9OxYsUK/v73v/PCCy/w8ssv07lzZ3Q6HfPmzWuQbdx///28+eabZvtvW1tb4+HhQV5eXoNsSwghaqq2LQ/1VbG7muE5MePGjavXrGUNrTl+i4R50rVMCAt2J3YtKz/9sqIouLq68v777xMREUFkZGSTx3Ps2DFWr17Nn//8Z8aOHWu8o6jVaqt9rkFtlJSU4OfnR5cuXRqkPiGEqA83Nzdat27NhQsX0Ol0ZlvCjx07xvr16/nTn/7EE0880QxRNr3r169TVFRU6y53d9JvdEsjs5YJIVoslUrFsGHDeOCBB/j000+5ePFik8eQnZ3N9evX6datm0m3iNLS0ga5W9muXTtyc3M5ceJEvesSQoiG0L59e9q1a8fp06fJzc01WyYjIwONRtOg42haAkMSd+7cuUoJhyGxk+eEtRxW1d3RlZe85GW5rzuFYSrPI0eOsGfPHuO++fr6VkoAFEUhPT29QafGvPfee7GxsSElJYXS0lJAn8Ts3r27ygeqVUen05mcn/79++Pl5cWnn37K+fPnjcsVReHAgQNs3bq1zlNOCyFEXdjb2/PII49w/fp1Nm3axLVr10w+P336NAcPHsTDw4OePXuafGb4O2mp3N3duffeezl58iRnzpwxLr9x4wapqam4urrWetKZ5r4euJNfNhUPtBDCctwNiQzoL/bDw8NZtWoVYWFhdOnShT59+uDn50dcXBxpaWk4OTlx7NgxDhw4QElJSYNtu2vXroSEhBAfH8/Ro0fp3r07hw4dIiMjg3vuuYezZ89SWFh42z7T7du3x9vbmx07dlBcXExQUBCPPvoonTt35vXXX2fWrFmMGjWKkSNH4uXlxffff8/XX3/Nyy+/LImMEKLJhYSEcPbsWfbt28eMGTPo0aMHNjY2XLp0iaysLGxtbRk/frxxbF/r1q3x8PDgxIkTrFixAj8/Px599NF6xTBo0CD69+9P69atG2KXasTOzo5BgwaxcuVKPvzwQ0JDQ3FwcCAlJYW8vDyeeuop2rVrV6s67/Tf6OZkJQdVCNHSOTo6MnbsWAoKCti8eTM6nY6OHTuyePFiunXrxj/+8Q/Wr1+Pu7s7iYmJ9OrVq8G27eLiwtKlSxk7dixHjhxh1apVuLm5sWXLFoYMGcLFixdNpkeuipOTE1OmTKFNmzasW7eOkydPAre6z23duhV/f38++ugj3njjDX777TdWrlxp8mwZIYRoKjY2NowaNYpp06bRvn17Dh8+TFJSEufOnaNfv3688847JuP61Go1w4cPp3379vz888/88ssv9b62tLOzw9XVtcn/Bvbt25f/9//+H+3bt2fv3r3s2LGDoqIioqOjGTx4sMwc2YKoioqKJIMRwkKVvxGh0+koKSmhqKiIoqIibty4cds5+4UQQgjReDIzM7G3t0etVqNWq7GxsTFOo294ibozO2uZtM4IYRnKJzKlpaXGl/wbFkIIIVqGir/R5ZMYSWTqx5jIyIWPEJbnbhkjI4QQQlgq+Y1uPDYyiFQIy2WuRcbw/0IIIYRofuV/m6VFpmFVm8hIxihEy1ZxjIxhnExDztolhBBCiLorKSnBysoKKyv94xsVRZFEpoHYGC54JGkRwvJUlcjodDr54yiEEEK0AIbfZisrK+NvtiQyDcPmds8+EEK0XOZmLbO2tsbGxobCwsLmDk8IIYS469nZ2dGqVSuZtawRWDV3AEIIIYQQQghRW5LICCGEEEIIISyOJDJCCCGEEEIIiyOJjBBCCCGEEMLiSCIjhBBCCCGEsDiSyAghhBBCCCEsjiQyQgghhBBCCIsjiYwQQgghhBDC4kgiI4QQQgghhLA4ksgIIYQQQgghLI4kMkIIIYQQQgiLI4mMEEIIIYQQwuJIIiOEEEIIIYSwOJLICCGEEEIIISyOJDJCCCGEEEIIi2PT3AEIIYQQQghxJ7t27RoANjY2WFlZoVKpjK+6UqvVODo6NlSIFkkSGSHuYJcuXWruEIQQQoi72tq1azl16hRWVlbGJAaoVxJj0LNnT55//vm7NqGRREaIO1jbtm2bOwQhhBDirqXRaDh9+jROTk7Y2NigUqmwstKP7KhvIqMoCr/88gsbNmxg0qRJDRGuxZFERgghhBBCiEZiZ2fHlStXGqVFxtnZmePHj3P16tW7slWmURKZpUuXcurUqTqvP3jwYAYPHtyAETW8U6dOsXTp0mrL+Pn54evrS3BwMG5ubk0UWdPYvXs3u3fvvm05SziXQgghhBCNpSESltvVXVRU1GjbaMmkRaYRnTp1ilOnTpGSksLUqVPvuGRGCCGEEELcXps2bYwtMg3VtQxAp9PVuw5LJolME8jPz2fp0qWSzFiY/Px8gNues5qWE0IIIcTd55VXXsHd3R21Wo2NjQ3W1tYNMmsZ6K9B3n777QaK1PI0SiLj5uaGn59fpeX5+fnGiz5DOXMXf5Z4QTh48GCCg4ON7/Pz80lNTSU5Odn4Pj8/3yL3zZzy57jieS1/7i11fw1/GNzc3KpNQGtaTgghhBBCNKxGSWSef/55s8srjqt4/vnnzSY8lqr8RawhSTt16pTxIv/3339vrtAaXHBwMMHBwZXuBPj5+TF16tRmjKz+yu9Tfn4+mZmZVSYoKSkpxnLS6iaEEEII0XSsmjuAO135lory/3+nMFzIG5RvlbJEFROzsWPHVrtP5SczMCQzd+J5FkIIIYRoaerUIpOfn8+GDRtqXN7S79DXVPkL2MzMzEoX+Xfi7F3lW9jc3NwsOpGpbRJjYDivu3fvlpYZIYQQQogmUudEpj7TK9+Jbjcd8dixY5swmqZRcX/vxiTGQJIZIYQQQoimJV3LmkBISIhFX+RXpXyLk5ubm8W2ONU3iTGQbmZCCCGEEE1HEpkmkJycbLxTf6eoOFOZpSZqDZXEGEgyI4QQQgjRNOrUtczPz48VK1YY31fsVjV16tQ7ajaymvDz86vUjaj8jGWG42OprRYV3UmD/N3c3IznqSGSjoqz00n3MiGEEEKcO3eOo0ePYmVlRUREBK1bt+bs2bMcOXIEgIiICOzt7Wu8TMgDMRuMr6+v2SRlw4YNxmfJZGZmNnVYjSI/P7/SIH9LvVg3PP/F0HJS34Sz/Pl2c3Nj3rx5DRarEEIIISzXL7/8QkJCAqAfdtC6dWtOnjxJfHy8cZm9vX2NlwlJZBpdUFCQ8cLW0EJjqRf9BnfabGwNlcxIEiOEEEKIqgwYMIBHH30Ua2trVCoVoG9diYiIMClX02VCxsg0uj179pi8t8QkxjBLneFVsTWmTZs2Jt3oLJEhmTGcn9vNQleRJDFCCCGEEE1LWmQaSEpKSqWxERUv7i0xiQH9vlV1UW8Y0A6mA90tUV1bZiSJEUIIIYRoepLINJD8/HzjxWxVnn/++SaKRtSVuWSmugd97t69W5IYIYQQQohmIF3LmsjdOJObpSrfzay6JAb0s7UZykkSI4QQQgjRdKRFpo5q+gBINzc3fH19LbZbGVQ9I5u5cneKimNm6ltOCCGEEEI0rAZJZNzc3ExaG6q6qKtpOUtgyU+yry0/P7+7sjWppt9PS/4eCyGEEKJxnThxAnd390ap+9ixY9jb29+11yINksgEBwfX6IGINS0nhBBCCCHEnWDnzp38+uuvODg4YGVlhZWVlXH6ZcN/60Kr1fLTTz8xcuTIhgrV4sgYGSGEEC1efn4+Y8aMYfz48Vy9erW5wxFCNBKNRsObb77JggUL0Gq1zR1Og4iJicHJyanB63V2dubVV1/lT3/6U4PXbSlkjIwQokU4ffo048ePZ8iQIUyfPr1OdSxcuJBdu3axdu1aunTp0sARNq2GOB6WSKPREBMTQ8eOHVmwYAF2dnYAWFtbY2trS5s2bbCxkZ8uIZqTVqtlx44dHDp0iMuXL6NSqWjfvj2PP/44oaGhJv9Gd+7cyY4dO4iNjaVPnz6NHptGo+Hdd98lPz+fhx56iHHjxlXb6vHNN9/wz3/+Ezs7O6ZNm4aPj4/ZclqtlqVLl/L7778zc+ZMXF1daxyTn58fPXr0QK1WY2NjY3wgpuEl6k5+DYQQQrR4Li4urFq1qrnDEOKud+7cOZYtW8alS5fw8vKiR48elJSUcPLkSdatW0d6ejoTJkzA3t6+UbZfWlrK1atXsbKywsHBodpE4NixY1y8eJH27dub/fzGjRukpqY2SpyiaUgiI4QQQgghbuvGjRusX7/e2HIaEhJiTCSKiopYv349KSkpHDx4kEGDBjVKDJcvX+bdd9+lTZs2TJ061dhqW5G1tTUFBQWkpaXx6KOPmi1z6tQpcnJysLa2bpRYReOTMTJCiBbr9OnT9O/fn4ULF3L8+HHGjRuHn58fgYGBLFiwgGvXrgGwdetWfHx8SEhIIDc3l4iICPr378/p06eNdeXm5jJz5kwCAwPx8/Nj9OjRJCUloSgKoO82MHXqVKKiokhJSWHMmDH4+flx+PBhYxwLFixg165dhIWFMWLECAoKCli4cGGlbVWMvXz9U6ZM4eTJk7z22mv4+/vj7+/Pa6+9Rm5uLqDvFhEVFUVERAS5ubkkJCTg4+NjrKcqOp2OXbt2MXToUHx8fAgKCuKDDz5Ao9EA+guQN954g5CQEI4fP26y3rx58wgICOCHH34wLi8sLGTNmjUMGDAAHx8fBgwYwJo1aygsLDTZbsVyFbcLkJqaio+PD1u3bjVZt/wxP3/+PFOnTqVPnz6kp6ezc+dOevTowdSpU9FqtcbjYnhvoCgKx48fZ9KkSfj7+5s9txVj+OGHH0yO05o1aygqKqr2+AohIC0tjaysLEJDQ02SGAC1Ws0TTzyBo6MjycnJJCUlER0dzY4dOwCIj48nOjqaH3/8EYCEhARiY2NJSUlh/vz5xMTEsHv37gaLtXPnzri7u3PkyBGzY210Oh0HDx7Ezs6Ojh07Nth2RdOSREYI0eLt2rWLl156iY4dOzJp0iT+322CQAAAIABJREFU8Ic/8Mknn/Dhhx+i0+kIDg5mw4YNDBkyBDc3N9577z0WLVqEh4cHAJmZmYwdO5aTJ08yZcoUZsyYgVar5YUXXmD79u0m28rMzOS1114jJyeHHj16mPT13rFjB3PmzMHJyQlPT8867cvXX3/Nc889h0ql4qWXXmLQoEF89dVXvPTSS+Tk5NC6dWtmzpzJe++9h5ubG0OGDGHDhg089dRTVdap0+lYtmwZ06dPp1evXixevJhHH32UlStX8uqrr5Kfn4+9vT0vvvgiNjY2fPbZZ8YL96NHj7Jt2zZGjhxJWFgYoE96Zs+ezd///nciIiJYsmQJPXr04G9/+xtz5swxXhQYyv3tb3+jR48eLFmyhMGDB7Ny5UomTZrE//73vxofl1atWhETE8OKFSvw9vYmJCSE1atXExMTg1qtrnK9PXv28Pzzz5Odnc2bb75pcm4TExNNkhmAlStX8vbbb9OvXz9iY2Px8vIiLi6OLVu21DhWIe5WGRkZWFlZ0adPH7Ndutq0aWMcn+ju7s6LL75I3759ARg0aBAvvvgi3t7exvJFRUWsW7eOnJwcXF1dq/23XlsuLi707t2bs2fPkpOTU+nz3NxcfvnlF7p27UqbNm0abLuiaUnXMiFEi6dWq4mPj6dbt24AXL16lTfffJPk5GQuXbpEhw4d6NChAwcPHsTW1pbAwEDjj+nVq1eNF+LvvvsuDg4OADzzzDPMmDGD9evXExYWZpxR5ubNm7z99ts8/fTTWFnp7/UYWlscHBxM4qiL1q1b8+GHH9K/f39UKhWKopCYmMjMmTPZuXMnkyZNom/fvri4uGBra4uXl5cxwajKDz/8wPr16/n44495+OGHUalUREVFMXDgQGJjY/nqq68YM2YMvr6+TJgwgfnz5zNgwAD69u3L3//+d9zd3Rk3bhzW1tYoisLWrVtJTU1l7dq1dO/eHYCnnnqKrVu3Mnv2bAYPHszDDz/M7t272blzJ++++y7Dhw9HpVIxbNgw/vSnPxEbG8u3337LmDFjanRcrK2tCQgI4L777sPZ2Zl27doRHBxcZbcRgLNnz7Js2TICAwNZtGiR8TkKzzzzDHPmzGHJkiX07NnT5Hy1bduW999/35iIjho1ihdffJEDBw4wbNgwWrduXaN4hbjbaLVaLl26hL29fZUD3e+55x4mT55sfN+5c2f+97//cfjwYfz8/CoN9tfpdPTr149x48YZk5jyrbn1odFoCA8P54cffiApKQlfX1+T5OvQoUNotVrCwsJISUlpkG2KpictMkKIFm/gwIF07drV+N7R0REfHx8uX75826l4z5w5w8GDBxk2bJgxiQGwt7fnj3/8I1lZWZw/f9643N/fnwEDBhiTmOriqAtvb28CAgJMniEwcOBA+vXrR2pqaq2nFi4uLua7776jX79+PPjggyY/1AEBAfj6+pKRkUFxcTEqlYonn3ySkJAQ1q1bxxdffMGBAwd48cUXjV0rrl69yt69eyvtq0qlonfv3ri7u/Pzzz+j1Wo5ePAg/fr1Y+DAgSbbDQwM5LHHHiMnJ4ebN2/W53BVKzU1laysLMaNG2fyMDh7e3tGjRrFtWvX2Ldvn8k6Tz75pElrmouLC507d+bixYvSvUyIGlCr1Q2W8NvZ2REeHl5tS4xWq2XBggVER0cTHR3NtGnTyM/PJzMzk9jYWONyc9M1u7u706FDB+Ogf4PLly9z5MgROnTocFc+8PtOIi0yQogWzzBVZV1kZmZy/fp1YmJiqixT/mLb2trabBJT3ziq4+zsTI8ePfj222/Jz8+v1fMGrl27xsmTJ0lPTycgIMBsmY4dO1JSUkKrVq1wcXFh0qRJvPzyyxw4cICoqCgee+wxY9n//e9/ZGdn83//93+sX7++ym0WFhZy9uxZunXrVumixtHRkUWLFtV4H+oqOzsbLy8vs/3bvb298fX1JSsrq9L5FUK0HLf7m2plZYWjo6Ox+1dpaSlXrlwxLjes7+joWOlvt1qtJjQ0lDVr1pgM+j9+/DgXLlxg5MiRjTa7mmgaksgIIe54rq6uTJ8+nfvuu8/s576+vk0ckSmVSoW1tTUlJSXodLo61REZGcmzzz5r9qLAycnJ5I5nr169GDRoEJ9//jlPPvmk2R/y8ePHEx4ebnZb7dq1M/6/i4sLrVq1qlPM9aHVavntt99wdXXFxcWlynIlJSWUlpY2YWRC3NlKSkqatPWyYnc1w3Niqpq17MaNGybve/bsibu7O6mpqfTv3x8bGxuSkpJwcXEhMDCwSfZBNB5JZIQQdzyNRkOnTp0ICgqqskxzPkFap9Oh1WqND0qrCxsbG/r06VPtmBKDpKQk40xCq1atIiAgoFIycM8991Q7NsfQj72goIDi4uImT2bs7Oy49957+emnnygoKKjUZ7+0tLTOSaEQojI7Ozvatm3Lf//7XzQaDe7u7pXK3Lx5k08++YTff/+d2NhYky6fzcXZ2ZnevXvzzTffcOrUKdq0aUNOTg69e/c2uSkjLJOMkRFC3NHatm0LwOHDhyvNYNVSXL16lczMTDp06FDrH361Wk27du0q9QGvyoULF1ixYgXh4eF89NFHHDlyhC+++MJ4bBwdHXF2dubo0aMUFBRUWY+trS0dO3bk7NmzXL9+3eQzrVbLxo0b+fzzzykuLq7V/tSGt7c3ubm5nD17ttJn//3vf8nKysLPz69GyZ0Q4va6d+9OcXExKSkpZv+eFhQUcObMGYAW9e+uX79+2NnZcfDgQZKTkykuLiY0NLRRugqLpiWJjBDijlJaWmryA9uzZ0/69evHZ599xokTJ0zKnjx5ko8//rjSs1Fqw9fXl9zcXJO6FUUhPT2dCxcuVCqfnZ1dqez333/PoUOHCAoKwtHR0aS8TqerNgFr3bo1AwcOJDs722RaZdBPbfrJJ59w9OhRY13//Oc/OX36NOPHjyciIoInn3ySVatWkZmZCegTv/DwcJKSktizZ49Jt6xr167x/vvvk5OTg52dHWFhYRw6dIjvv//eJMa0tDQWL15MYWEhrVq1on379tx///0cPnzYJL5z585x7Ngxs/tV8TyaExQUhI+PD5s2beLy5cvG5Tdu3GDTpk04ODhU2T1OCFF7DzzwAB07diQpKYnk5GSTf6MlJSXs2rWLK1euEBISUqnLaklJSVOHa+Tl5UXXrl1JT09n//79+Pj40KlTp2aLRzQc6VomhLhjBAQEkJCQQFxcHMHBwURFReHu7s6UKVN4/fXXGT9+PFFRUXTv3p3//Oc/bN26lccff5ySkpI6d+nq06cPfn5+xMXFkZaWhpOTE8eOHePAgQNmf7ivX79ObGwsw4YNIygoiNTUVDZv3kyfPn0YMWKE8Q5h+/bt8fb2ZseOHRQXFxMUFFTl06kHDx7Mjz/+SEJCAj///DNDhgzh6tWr7Nq1i+zsbLp27YqiKBw+fJi1a9cSFRVF3759sba25oUXXuDf//438fHxLFiwAHt7e8aMGcNPP/3EX/7yF1JSUhg4cCC5ubls2bIFKysrIiMjjdtNTU3l7bffJjU1lYceeoiffvqJxMRE/vCHPxAREQFgnEJ6y5YtaLVafHx8yM/P58svv+Ty5cv06tXLuC8ODg5069aN7du34+joSGBgIE8++aTZ/e7YsSOvvvoqs2fPZvz48QwZMgR7e3v++c9/cvz4cd599916zzInhLjFwcGBcePGsXz5clavXs3+/ftxd3enpKSEzMxMNBoNoaGhPPLII8Z1vL29sba25vPPP+enn34iIiKiXkmEs7Mzs2fPxsrKCltb2xqtY21tTVhYGOnp6cbWmIZ8Zo1oPpLICCHuGA8//DATJ07k008/5bfffjPOxhUaGsrWrVtJSEjgs88+4/Lly3Tr1o158+YxdOhQ1Gp1ncfIdOzYkcWLF7No0SL+8Y9/0Lp1ax5//HESExN55513KpXv2bMnb7/9NuvWrWPq1KkAREREMH36dNq3b28s5+TkxJQpU4xlnZ2dq0xk7O3tmTt3LmFhYXzyySdMnz4dZ2dnHn74YZYuXUrHjh3RaDTEx8ebPDMG9M95MDxbpn///gwfPpz27dvz8ccf89lnn7Fx40a+/PJL2rVrx9ChQ3nppZeM3fXs7e2Ji4uje/fubNy4kcTERLy9vXn11Vd59tlnjXdk1Wo106ZNw9bWlsTERL7++msefPBB4uPjOXDgAKmpqcZ9adWqFRMmTODs2bNs3rwZnU7HkCFDqjz+kZGR3H///axYsYKFCxei0+kICQlh48aN9O3bV7qOCNHAOnXqxLx589ixYwfJycmcOXMGKysr3N3dmThxIkFBQSazhxmmtN+3bx+HDh2iX79+9dq+lZUVzs7OtV7Pz8+PDh06UFhYSM+ePesVg2g5VFqttmV2GhdC3JaiKMaXTqczziZTXFxMYWFhs8/GJW7RarXMmDGDs2fPsnr16iofKCeEEOLOkZmZia2tLa1atUKtVhsndVGpVMaXqDsZIyOEEEIIIYSwOJLICCGEEEIIISyOJDJCCCGEEEIIiyOD/YUQognY2dmxdOnS5g5DCCGEuGNIi4wQQgghhBDC4tjk5OQ0dwxCiDoqP2tZaWkpJSUlFBcXU1xcXOsnxAshhBCi4eXm5tKqVStatWqFjY0NVlZWMmtZA7GR6VmFsFxVTb9cVFRk8qRzIYQQQjQPHx8f1Gq1TL/cCKRrmRBCCCGEEMLiSCIjhBBCCCGEsDiSyAghhBBCCCEsjiQyQgghhBBCCIsjiYwQQgghhBDC4kgiI4QQQgghhLA4ksgIIYQQQgghLI4kMkIIIYQQQgiLI4mMEEIIIYQQwuJIIiOEEEIIIYSwOJLICCGEEEIIISyOJDJCCCGEEEIIiyOJjBDlzFeBal5zRyGEEEIIIW7HCqBgF6g8Ia1sYd5WUKlg0znzKy0JhNFbb72f71n1xV9t666NguMwPxpCffV1egbA5GWQXWC+fPZWGB7SMNtuSnn7YfJQ8FTp9zM0GradbI5AIH4SDAjQx6HyhQnzIDmvYapvzPNjqeeeAtizTB+7quz8dx8K8V9B+a/55mfhnnugdWtwdQV392aLWAghhBCiSVgBaAqA/uBZtlBbdoXk0db8Spp06N6h7E0haPIg3KOKsrWsu6bSloF/AMxeC8mn9cvyjkP8FPB5EcxdW+edhG0p9dtuUzuxFgLDIX7XrX1KXgvD/WF+E+5L3lcQ6gmTP4Z9x8sWnobVc/TLkxtiG414fizx3Bemw2B/GDzFNPYTu2ByJMRb2P4IIYQQQjQkK4C8X4G2YFu2MC8P8ABXWzNrnIMTgK3hs0uQDXi4mN9ArequqXMQNwXygmFjGmgVUBRQiiErGaZ1qkfdLclpeCMa8rrAqrRb+3hkJXgAs1+/1dLV2FZHQ7IHxO0DbXFZLApoTsDi526dX9FAzkHM47AnD8Jfh6TsW8dcexF2fwQuNhXWGQ5Z10GjgQsXmiVqIYQQQoimZLg8qvq1MUdRzm+6fTlQFOYqiqIoSlxNypbVXWvJptuqTo3iHqEo5yutqCjLX1EUfw99Ge9gRVm8r+r6d2sUJWObosQE69979FSUaZsURVuH3TNImquva9aByp8ljtd/tjjN/LqrRigKXRQlMbseARjkKEpUVcepGuf3KUrsEEXxKDvOIeMVZW+FCup8fmqy/TrUHWf4XmkUZeObiuJdVi7yFUU5ctX8dsqfdzwUJfJNRcnQ1CHgcvbOKDtmc6v/DpWWlio6nU7Z8HSJwvCbStb164pGo1EuXLignDp1qn5BCCGEEKJeTp06pVy4cEHRaDTK9evXlZs3byolJSWKTqdTSktLmzs8i2eZg/3vgyiAFbD6+O0K115hOgwO1HejOlHWnys7Bd4Ih9AFUGhmnY1ToHsUrC7r7pN3HJaMgjd21T2OjANALxj+x3ILSyBtM6w+oH+bdMLMiikwYSv6Fp2tZj6vrfugtwewFZZ/VbNV0paZ7xI3wBPmH26AmBrTMZjwIIxepG9tBNjzMURONx2XArCnwnknD/Ysgu7+sK2u43EKYc8CwAPi3pTWLiGEEEIIc6wUBRb3gqhNt+5Tx3kAc2+9H3UfeDxX1pVop37FpLLPzm/Rv9+YU1b+bf37WWWf16TuWrsPFm8B7zyYEADdR8GmFPMJhiFuRYGkuRViNby26LtqGcS/AnuA5cm3uq1pc2D585A8EzaerrydTRsg8l3Iulp2nPZBCBC/7NbFcG3lfQeEgHfZ++yv9GNjeo+CPWUxbDMTC71g+QigCyweUceNVzBtu34c1PxI8IksG2xeUkXhdIidAjwPSeWO9flkGOUBs+fcOiZ1OT81Vee6t8Lqa+W6812EWb0g72PYV27wVcEuiFkGITMg4+Ktrn8Z2yAkD2JXmP9O3ta5si6Dw6C37a1lw1W3BvyrVE07RkoIIYQQoqWxIg+OpIO/YfB+HmTkwahu5lfIPg4Mu3Xxd/404AH+5hKSWtZdG94jIOs8LH8FCjfD6BCw84U31la+a14r6bAxBWJWQGzwrbvhtvdB7BwIB/aZGZgSuw12zwBvB/17l0f0d9P5DtLqM6uXJxTuhwnh+gRi22n9mIkjaWWtUubYQuwWUDIhqoHGC9kGw94TkDgXbNP0g81dO5S1WlwzLZu2Xz/4P/59CCn3vfAIhri5wFf1PCaNrQskJkNMr7L3bWH4WP3/FpZL3pK+grxeEP8u+Bsmr7AB/2H6c5+3Vj+erNYuwT4AT6hi6Jl5ieAjs5aJBnbmzBkGDBjAnDlzKC4ubu5whBB3gYSEBGJjY8nKymruUEQLZ6XyhE3A/P5ld3rL3m8aqX8/fLO+4Pyyu8C9ZwLbwaf8+zzoXfY+GcjbbFrX7equMw/9Xe8sLSRt0l/YL4kG/7H1aAXJ1N8NXx1levfbMN3wPmCfmVaQ0AcrL/MO0P+3sKqWi5qYA57hsHo/hE+Evedh7/sQWHbh7FFxwHdjcoGotyEjBzL2QIw/bJoOPoMguVwyk12W6A1vV/kY+ryo/+xES54GeWzlBNDTTJNQ2nYg/dZ3v/xrwCIgD7LrkrDZlN0ouFquRec+SDS0bG2qQ50WIDU1FR8fn0qvoKAg3nrrLY4fP46iKI0aw9atW/Hx8SE1NdUi66+P06dP079/fxYuXGiyXK1WY2dnR9u2bbGysszeyEKIhqXVatm8eTOvv/460dHRxMTEMGPGDH744QdKSkwverKysoiNjSUhIaHJYluwYAHR0dEsWrSIoqKiasunp6czceJEoqOj+fHHH5skRtFwmvIyuPHYQshzkDgCNkXD6A3wxuOQ+Fwd6qpP0lFRnfoV3WLbC0jXtz4tfxciu5T78FfYBozqUsXKjckG/B+HVY9DzDwInQNRH8D5sm6FhfXcb4vRWK1K9+lb/jZ9p2/RCazpesMh61NwLYKiIrh8uZHia2QDBgygR48exvf5+fl8+eWX7Ny5kwULFvDUU081Y3R3nw4dOvDll182dxhCiBbi3LlzLFu2jEuXLuHl5UWPHj0oKSnh5MmTrFu3jvT0dCZMmIC9vX2jbF9RFK5du0ZpaSmOjo7V3mDJysri119/xc/Pz+znOp2OgwcPotPpGiVW0fisDGNeDGMHKo55MSQD5ce8lB/jUnHMSwiVx9Pcru4GYwOjyu74mx07UgMeZQNSpn1b9VxX52fUrK5928vqrOMzcwIf1/835v9VSGKAfWXHtrdv3epuKCHP61vC8o7duq43tETt1VZ9DGf1ba6IG473CGAQZFUzL1pUnQb3QPjjQDqs3t7AQVuAiIgIpk6danzNmzePLVu2cO+997J27VouyNzSQgjRLG7cuMH69evRaDTExMTw17/+lQkTJvDyyy+zcOFCQkJC+Omnnzh48GCjxVBYWMjy5cuJi4vjcjV37KytrSkqKiIpKanK1vzc3Fx++eUXrK2tGytc0cisCsu6BLmUje0wtEjYVtFWU5gOgQ7lFuRV3b2ptnXXVN52mL8ZsisMhin4FeJX6v8/tlfl9WzL4li+AvKqajXoBbM8YMkk/QMHC2rYuqDJK9eF7BJsmwOzv4LAuRBax2mnQofq78bPfh02pZctLIG0T/RjUxgBo83sJ8DqkfqucNt+rdu2TRyGCcv0M7iVPxyFebBtpb5lKDDk1ripwD/q/3/CJEj+teYNUzU6P3XUGHWHPA58BxPm3JrdrqGMfl1/DOOjYPJa03FI2mtVrnbH6tKlC4899hgnTpwgJyfHuFyn07Fr1y6GDh1q7Ib2wQcfoNFoTNYvKiri888/JzIyEh8fHwIDA5k5cya5ubnArW5tb731FgDPPfdcpS5gmZmZzJw5k8DAQHx8fIiMjOTzzz836bZg6J61YMECdu3aRVhYGCNGjGDv3r23rb88rVbL1KlTiYqKIiUlhTFjxuDn58fhw4eNsW7durXKdQz7v3DhQvr378/x48dJSEggKCgIHx8fhg4dyqFDh1AUBY1GQ1RUFBEREeTm5pKQkICPj4+xi5m5LmcLFy7kkUce4fDhw3zwwQcEBQXh5+fHuHHjOH78OKWlpfzrX/8yOS8JCQkUVmiuren5E0K0DGlpaWRlZREaGkpISAgqlcr4mVqt5oknnsDR0ZHk5GR+++033nzzTeLi4tBqtRw6dIjo6GhjFzPDOJiUlBTmz59PTEwMu3fvbrBYvby8uP/++/n555+r/Jty6NAhtFotvr7NfFdY1JnV+bILXdeyUcXZZS0Z5sYEkAcZgLfhs3NwBAitYkB5requjUKYPQp8XE3HJbh6w+QN4PE8TBtSebXAR/QtRskLwNOu3Lojy/USsoXJK8D7NEwOAVe7yuMfNpkZ3zH5QbBrVVamHQyfp39gZ/zrdZ8+1zYY4ucCKTA6sKzuVtD7RcjzgI0Lq5jNq6GnXy6B1VOguyfYlTsOdp4wfAEQDPETy8X9CMSPh+y1EOptuk6l411Ojc5PHTVG3d7PweJHYN88/bGpNKZqXt3jtR0Ee94vS2aiwcex8jiju4lKpaJDhw6UlJQYuwDodDqWLVvG9OnT6dWrF4sXL+bRRx9l5cqVvPrqq+Tn5xvLrVixgpkzZ9KlSxeWLFnCs88+y969exk7dixnzpzB19eXDRs2MHGi/os8e/ZsNmzYYPxxS0pKYuTIkZw8eZI///nPfPzxx3Tt2pWZM2eSkJBQqVvCjh07mDNnDk5OTnh6euLj41Nt/VXJzMzktddeIycnhx49emBjU/u7QAUFBcTGxrJ//35GjhzJCy+8wIULF3jttdf45ZdfaN26NTNnzuS9997Dzc2NIUOGsGHDhtt24cvNzWX8+PFkZ2fz3HPPMWLECA4dOsRrr71GXFwcM2fOpG/fvsTGxtKpUycWLlzIypUra33+hBAtR0ZGBlZWVvTp08ckiTFo06YNXbrou5BYWVkxatQonnrqKdRqNZ07d+bFF19kwIABxvJFRUWsW7eOnJwcXF1dUavVDRbrPffcQ1hYGAUFBZw4UXnqncuXL3PkyBE6dOhAhw4dzNQgLIFNYQnQq9zFdnGF9+WV6O+ue1ZoYfF0MFe4rIWipnXXgscISAI2bod9B8ruhntAeH+IGg+jH69itqdesOcAzP4I9mytekIAj2Fw5Jh+1q2NX9X+brt3MESOgtmv1H8wfsjbkNETliwse1aJB0QOg9lzIaSqLmtl0y9PTmug6ZeDIWsPxG+FpAOQXJaQhgyCweMhZkTl/YxaAxlDYMky2L2/hslCDc9PnTRG3bb6LoiBH0P8JtjWwNMhB74OJ4bAxvdh43dlx73sex45DEbfAd3zaqqoqIi0tDS8vLxwc3MD4IcffmD9+vV8/PHHPPzww6hUKqKiohg4cCCxsbF89dVXjBkzhrNnz/LFF1/w7LPP8s477xi7EISEhBAbG8s333zDpEmTCAsL47fffgOgR48eBAUFAXDz5k3+/e9/069fP+bOnYt72ZRwAwcOxNHRkS+//JJhw4aZ/BA6ODgQHx9Pt263pmj09vY2W391bt68ydtvv83TTz9t7Ade24kCrl+/zvjx45k8ebLxIqF///689NJLJCcn061bN/r27YuLiwu2trZ4eXkRFhZ223pLSkqIi4tjxIgRxgua4OBgXnvtNePFiaFf+tWrV3nzzTf59ttveeaZZ3B3d6/x+RNCtAxarZZLly5hb2+Pq6ur2TL33HMPkydPNr738PDA2dmZb775Bjc3N4KDg03K63Q6+vXrx7hx4xo0iQH4/fff6dy5M+3atSMpKYkHH3zQZBvHjx/nwoULjBw5kps3bzbotkXTsQl5+9azXwBC5oIyt4rSZTMnGXWq8L6CWtVdGzb6wf0hdRhj4/JHWP7HGpTrCbPWwKwa1rsxp47PxKkB/2GwahisqukKZdMvxzZgDN6Pw+LHa7eOIe7aqOn5qYua1D1LMX/OPZ4Dxdz3zQbCX9W/GoNLF/3MfFWdS0O33+c+g5E6KCmB6udnsTwFBQVs2LCB7du3M3HiRLy9vSkuLua7776jX79+PPjggyZ3BgMCAvD19SUjI4Pi4mJ0Oh0lJSWVBoWGhYVx4MABWrVqVe3277nnHmbMqDwwztramoCAAPbv31/pR3DgwIF07dq1nnsO/v7+DBgwoF6zhXl6ejJkyBCTH/AOHTrg7u7OxYsX61yvl5cXvXv3Njn2/v7+eHl5MXDgQDp37mxc7ujoiI+PDz///DNXr16lTZs2NT5/tzs/QoimpVarad26dYPUZWdnR3h4+G2TmISEBA4dOlRp+bRp04z/7+bmxsyZM7G1vXW73MnJCT8/P1JTU00G/RvGzri4uBAYGEhKijyYzVLdGbOWCSHuGG+99ZZxLImBjY0NY8eOJTY2Fmtra65cucLJkydJT08nICDAbD0dO3akpKQELy8vQkJC+OSTTzhz5gzPPPMMDzzwAK6urri41OxJPYrbt6bEAAAgAElEQVSikJmZyfr16/n+++9NEgAvL69K5a2trc12u6gta2vrek95bGVl1SCx1IadnV21g2evXbtW4/MniYwQd7aa/H1ycHCgTZs2gP7v8dWrVyktLcXJycn4N9LFxaXS30uVSkVoaCipqakkJSXh6+uLSqXi119/JSsri6CgINq1a9fwOyWajCQyQogWpfz0y1qtlp07dxIUFMS0adOws7MzKRsZGcmzzz5r9ofQyckJtVqNtbU1cXFx9O/fnxUrVhATEwPAgw8+yGuvvVZpwKo5ycnJxMbG0rt3b+bOnYu/vz+Ojo7s3LmTVatq3FYqKqjJ+RNCtCwlJSW3fTZLQxszZoyxq6lWq2Xp0qX8/vvvzJw5s1I3N61Wa/K+U6dO+Pj4cOzYMS5evGjsagYQGhra5Dd6RMOSREYI0aJEREQwYoR+cJeiKLi6uvL+++8TERFBZGSkSVkbGxv69OlTKcGpSK1WM2TIEIYMGcKVK1dIS0tjxYoVvPDCCyxdurRSveVduXLFOOPXokWLcHR0NH7WWM9JuFvU9PwJIZqf4cG4//3vf9FoNMbxguXdvHmTTz75hN9//53Y2FjjmMbmpFarCQ0NZc2aNaSlpREcHMypU6fw8fGhU6cqZqsSFkMe01xPhmfmNNb4GCHuZiqVimHDhvHAAw/w6aefGrt0qdVq2rVrZ7zDVp0zZ86QnJzMlStXAP2d/ocffpilS5fSuXNn9u3bV+1Az/z8fHJycvDx8TFJYkDfRUrUXm3OnxCi5ejevTvFxcWkpKSYfTZLQUEBZ86cAWhRNyh69uyJu7s7qamppKamcvHiRUJDQ6XV9w4giYwQokVzd3dn/PjxHDlyhD179qAoCq1bt2bgwIFkZ2fz2WefmXRzKCoq4pNPPuHo0aMAnDp1itGjR/P111+b/PAWFxdTVFSEWq2u1K+6/HTKzs7OuLm58dNPP/H7778bl588eZJ//OMfddqn+jxFun379tx///0cPnzYZL/PnTvHsWPH6lxv+diqenhcQ6nN+RNCtBwPPPAAHTt2JCkpieTkZJO/FSUlJezatYsrV64QEhJSqcW6tLS00f+2VMXZ2ZnevXuTk5PDtm3baNeuHT179qx1PVqttsm71YnqSdcyIUSL179/f8LDw1m1ahVhYWF06dKFwYMH8+OPP5KQkMDPP//MkCFDuHr1Krt27SI7O5uuXbuiKAoPP/wwUVFRzJ49m+TkZB577DFyc3PZsGEDBQUFPPnkk8YB5b6+vrRu3ZrFixcTFhbGsGHD8Pb2ZuDAgSxcuJDRo0fzyCOP8Ntvv7F3717UajUajYb8/HzjsxOqY65+Hx+fWh0Lw/TIW7ZsQavV4uPjQ35+Pl9++SWXL1+mV68qnpJ7G+3bt8fb25sdO3ZQXFxMUFAQjz76aJ3qqomanj/pvy5Ey+Hg4MC4ceNYvnw5q1evZv/+/bi7u1NSUkJmZiYajYbQ0FAeeeQR4zpt27bFxcWFo0ePsmzZMnr27El4eHidY7C1tWXy5MmUlpZWaiWvTr9+/fjhhx+4du0avXv3xtnZuVbbzcrKYsmSJbi7u/PGG29I1+IWQlpkhBAtnqOjI2PHjqWgoIDNmzej0+mwt7dn7ty5fPjhh2g0GqZPn85HH31Ex44d+eKLL3jooYdQqVTGcn/+85/55ZdfeOWVV4w/phs3bjR5nktAQACvv/462dnZrFq1ioKCAlQqFRMnTmTx4sUUFRWRkJBAVlYWixYt4q9//SsAZ8+erdF+mKu/ttRqNdOmTWPs2LEcOHCAFStWkJWVRXx8PC+99FKt6zNwcnJiypQptGnThnXr1nHy5Mk611UTNT1/QoiWpVOnTsybN4+BAwdy4cIFkpKSOHz4MLa2tkycOJHo6GiTh/c6OTnx9NNPY2trS3p6Or/++mu9tq9SqXB0dMTZ2blWszp6eXnRtWtXHBwc6NevX623q1arUavV2Nvby9+mFkSlNFc7nxCi3hRFMb4Mz0spKiqiqKiIy5cv3/bJ8UIIIYRoPJmZmTg7OxsTIRsbG+MU/YaXqDtpkRFCCCGEEEJYHElkhBBCCCGEEBZHEhkhhBBCCCGExZFERgghhBBCCGFxJJERQgghhBBCWBxJZIQQQgghhBAW565MZAr+L57JI0PxKTf1nWpecnOHdedImY9KpWJ+SnMHcje6wOfjXXF3d2/uQIQQQgghGlXjJDLnNjG8fJJQ1asZkofC/bPp3X8y8VuTyW7yrZcpSGZJ9AC6L7C05CmbbZOGExq9ibzmDuUuUZCyhAnh3SUpFEIIIe5g8ntfN3dZi0we2z6eTzaRLE7WoC33MEHl7ZCmC+NaNklr93GipOk22TDyOPHxNpKvNXccd4/C7CRW7z9RizXcuV+egSmEEEJYlNr/3gtorETmvlEklk8ScjYSBTA3yeRJ5E2aPACQTfZW4JVYYoJdsG3irQvRJFo1dwBCCCGEEI2vZbXIlOSxb9lkBgd46rueeXZnQPQS9jV0PyZPV1yqLVBA9lem42h8QoYzedk+8sy2ohSQtnk+E0J8TOLe82thuTLJzDd0qeswmm0Ac0Irdbcz16SYtz+eyUO746lSoVL5EFrFMcnbPByVajibzgF5+1gSXRa/Z3dGL0qmoMYHyKRWNo00xBfKbICto8tiufUavtn8SSo4vok3IsuOi+9gJq9No9B8SU5sNz2Gg6dv4kTdgq6wC8lsmjeBASbfq/lsO26+8oLjm3ij4hgq46vs+NY6bv35H745z/TcqHwIjY4nrXz5sjFGKpUKz1HbAJgdUjGO+Vhax0QhhBBClFPX3/u8fcRPGkx3z7J1AwYwYVFV16h3OKUp5GxUokBhblLVZa4mKXHBKGDuFaLEJWvrtu3kuCrqLP+KUjbm3Frl/KaoKst6vLlXMY1Eo+x+xcN8+REblfPGcklK3G3jQIlLLl+3VjnyUaTiUdUx+Y9pJPq4o5Tlm+KUcI/K64S8f6QOB/C8snHE7eOO2nRrTw3HPGp8jOJdqayHErtTU2kbu18NMV+3xyglMUeph+qOe6SyKtO0tDY5Tgmp8XelNnGXxfF4pBJp5tyYfFdq9J2NU5IURSktLVV0Op1SUlKi3Lx5U7l+/bqi0WiUCxcuKKdOnarPgRNCCCFEPZ06dUq5cOGCotFolOvXrys3b95USkpKFJ1Op5Qmzavx771BtdcpwXFK0tVm29Vm0UISGa2SNDdQf7E9Y7eSZTgJWo1yZOUo/YV88HIloy7brksis2WaEvPRbiXj/K1EQZuzW5kVjAKxyu7y1+HnE5VRoPBqoqIxFtcqmhO7lcVzE8slMrU5HmXSFishoHg8v1xJyjFUrlXOJy9XRnmg8PgqJat8KOUSMO8Ri5W9hnWyN+rL91qs1CWVuaXsYtwkQTOj3DH3eH6VcuSifrHm21lKICg8b3pcNDtjFQ9QQmYkKhllZZVijZKxbZZ+/2dUTB5rF/PiEXHKxuSsW+en+Nb3KtAkuctSVj2OgkekErfvfNk2tYombZUyygPFo15xl0uousQoq9LKvkQX9yqzeqHAKCXRzEE1nFPTBPcWSWSEEEKIlqvaRKa01Fjudr/3iqIoijZJieulv5k9a0+Woi0uW3zxiLLqef1N9ZCP6nS1bLFQkhUFzLxGKNVfrNbG7S7ctXuVaVS+MC/7UNn7pocC4ZXuntde2cXk7RKIqtaeWznpUbR7lVkeKATHKhuPVWxpqEINE5kj7wcqEKMkXqz8WdbK8EoXv4Z/BJELjyimkZxXEp+vnNXXXu0SGe/xiRXO5xFlca+K62uU3a9UlWRplb1vouARV88EzAyz56Bs/yYmKhXP5N4ZFY9fbeMuqzs4VtmdbVpaf54rfK/KSCIjhBBCWK6GTGS0307TX+etrHy1rFzdq0zzQGGQuWvpO1fLGCNzKY9sIHBQb7wrfWhL98BQYB95l5oqoLIxL+HdTcaChM4xU9Q2nNnb4wj/NZ7RAa54Bgxm8qJNJJ8zPxKk5vLITksDVjO8XeWxGj4v7gP2ceJc5TVD+wdWGAPkQdSnCooyi6acXiHmxagK59MTz0ozamWTth1If4Pelcaj2DFgEZCXQXY9xkkV/rqH+EnDCfUtV7dhnJIJW2x7AbtWszqloGwsTyEFKUtYshYY5IFHfeOOHE1kJ9NFnh6Vv/VCCCGEEOUVXMoGAgl/0Mx1g0N3evcHvsu7qx6RYUOwvg2mJXB1qH4IftPIY9vYQIZvqPnXwDZ4FntzYkjbtZuNu1azbfpo4qeD9/hE9q6peDFfc4X1zYUsQiGN+S+u8PB8wh+cXcOB8YHEvj+L1eHzeSPElTdMPgshbvvocueyceMWQgghhKjMFReH5o6h5WgZLTIOLngC+7bvM/OQykIy0pKAKLzva4JYTu8mfkMeBM9id7bWZLropLnVrGfjQeCwGBavSSKr+Dy7Z4SQvXY4S3bVddotD7wDAKaxV6uYTlttfJ1nVt86Vt9ieOM9Ahi0iiyz+6igKBuJ8rhtRWYUkrRlNsl4MGrlETRaM1OCm8hjz9rVnAiOImZESFnS4k3IiGlsPLaHWcHlJ+xuzLiFEEIIIUzZOngC+9h2wMwj3a9lcOQAMMK7zjfQLVHLSGRcQgkfD3w1mwlz9pBteOBiYR77Fgxn9KI8GD+ayKZIZAryyADo4I2/R9mF67VskjfPZ/GnZsofXsLoeZtI/rXg1rTCNi6EPhJOIHD+mplmlbLEjRXLWZ1edaIT+MdZeLCECZPiTetvNmVdr7YuZ/lXeQ0Ujwchj0fCd7OZMGcbJ/Iaci8LKMgD8MS7kzcutgCFFJzcQ/zc1SRVLH5uHxs35BEVHcfilXs4olVQlCyStixmVM+KrYWNGfct+j9asHzFatKarGulEEIIIZpSTX7vXULCiQH2zJzA7K+yKSybbrnw3D7mjxrNkjyIeS6Su+oeapOMxKnJ4HbDzFrmZhXrEqMkZle9as3VYLC/cUaIms1wVu2saFVOHVz1lM2mg7zOK4njvauuv8Kg+xrNeFFPR943P92wuemXK8dRNpVzxckCtEeUxY9UMYV1PSZnUJTqp9KuVLdmtxJb5XcwRImZm6hklJ/WsFZxV/3dM0ybbW6wv3Kxqphk+mUhhBCipavpYP/b/d4bZG0aVcVjOcxNsnTnaxktMgCdRrHxWBLLX4kipEvZsi4hxMxNJOM/q4jqVO3aDcc2hFnbdxM33tC1yAP/IbEs35NF1rbYyuWDY8nas5xYY1ck8OgZro/7xEaizLYiuRC5cB+Jc2MI71ld3uxB1JojZGyLI+YR/xaRYQe+voekj2KJCm7AhkvbQKZ9m8behq4X8HhuNUfWxBJpOM5dQoiZu5Gk80dY3KtCYZdIps2NBOD/s3fvYTVl/x/A36eSSqmIaijT6SaEg5A0qDSEjAyDZuQ2zK9mxLjHYExjRvgajMb9O8O4k0splymj0BBCFCXN1zWUcuneaf/+SEenDipRJ+/X85zn0dlrr/1Za5+T/WmvtbZZVyf5c3M9GhvmDkaroRteDH98i3HLGLhhybE98B/tBJva8AEgIiKi6lfB/+/Nhm9BQpT8dadZ17HwD4rH+TeYl62sRIJQW6b6E9WskoUBjDfewJ7RZX4VpEVgTm9n/HjBA1tu7cGIdzHMsQKEUnNypFIpCgsLkZ+fj/z8fDx+/BiWluWWiSMiIqJ3JCkpCbq6ulBXV4e6ujrU1NSgqqoqt9opVV3tuSNDVMNiw56vbpabidRnpTbkZiIlLhqxFwAYdXg3i04QERER0Sup1XQARLWFTXsfGGEVgrw7IMhbUQl7+Gwc+06fxUNEREREivGODNFzegN+RWz0FviPdnoxTwvP5zxNW4/we6fwa19OVCEiIiKqDThHhkiJcY4MERFR7cU5Mm8X78gQEREREZHSYSJDRPSey8jIgIeHByZNmoScnJyaDqdaKGrT9evX4ejoiICAgBqOjoiIqgMTGSKqFU6fPg2xWFzu1aVLF8yYMQOXL1/G2x4Ju2vXLojFYpw+fVop6yciInqfcNUyIqpVnJ2d0bp1a9nP6enpCAkJwYEDB/DTTz/hk08+qcHoiIiIqLZgIkNEtYqrqyuGDBki997IkSPx1Vdf4b///S/s7e1haGhYQ9ERERFRbcGhZURU61lYWKBPnz5ISEjArVu3ZO9LpVIEBwfD3d1dNgxt2bJlyMjIkNs/Pz8fu3fvhpubG8RiMSQSCfz8/HDnzh0AL4a1zZgxAwAwfPjwckPAkpKS4OfnB4lEArFYDDc3N+zevRv5+fmyMiVzMH766ScEBwfDwcEBQ4YMQXh4+GvrV6QixyyJfdeuXYiMjJTri40bN8qVBYrnjvj7+0MikcDKygpfffUV4uLiKnQeAgIC4OjoiJMnT8LX1xdWVlbYu3fvK+eelOxz/fr1KsVb1X4hIqK6j3dkiKjWE4lEMDExQWFhIaRSKYDiJGblypVYs2YNhgwZglGjRuHChQtYu3Ytzp07h+XLl6Nx48aQSqUIDAxEYGAg+vTpgy+//BKJiYkICgrC6dOnsXbtWlhaWmLz5s2IjIzEunXrMGfOHFhbW8uWrz516hR8fHxgZmaGmTNnQk9PD4cOHYKfnx/u3r0LHx8fqKqqyuLdt28fdu7cCSMjIxgbG0MsFr+yfkUqe8y1a9ciPz8fvXv3Ro8ePXDy5En4+/tDXV0dn3/+OQAgMzMT06ZNQ2RkJBwdHWFra4uHDx/C19cXWVlZMDU1fe25yMzMxLfffgsAaN26NerXr1/5E1rBeKujX4iIqO5iIkNEtV5+fj5iY2PRrFkzNG7cGAAQGRmJP/74A7/99ht69OgBkUgEDw8PuLi4wMfHB2FhYfj8889x8+ZN7N+/H8OGDcO8efNkF7n29vbw8fHB4cOH4e3tDQcHB9y9exdA8QV6ly5dAAB5eXk4fvw4OnfujAULFsiGtbm4uEBHRwchISEYNGgQTExMZPFqa2tj1apVaNmypew9MzMzhfUrUpVjGhgY4D//+Q+MjY0BACNGjMD48eMRFRWFQYMGoUGDBggJCUFERAR+/vlnDBkyRPb8gn79+sHHx6dC5yIrKwujR4/G119/DXV1dQCQ3W2pjIrEWx39QkREdZdabOBSuM1+Bnxmh1Or3WBW0xG9Zakhq9DtizSYzXXHwckSaNR0QET0SpmZmdi8eTP27t2LL7/8EmZmZigoKMDRo0fRuXNn2NnZyT1QzNbWFpaWloiPj0dBQYHsQaE6OjpQUXkxmtbBwQFRUVGoV6/eK49fv359zJo1q9z7qqqqsLW1xbFjx5CXlye3zcXFBdbW1lVuc1WOOXDgQFlSAAB6enowNzfHzZs3kZ+fDxUVFZw9exatWrWSJX4lbGxsYGZWsd/+xsbGGDBggCyJqarXxasokalKvxARUd2lJvGegIgGG+A0KQae5saImFaXL+5TEbEnDSkAUhbEIXayBPY1HZJSSMXW0WvgCTvc+68bjGo6HKrTZsyYIZtLUkJNTQ0jR46UDRt68uQJrl69igsXLsDW1lZhPaampigsLESzZs1gb2+PdevWITk5GZ999hnatWsHfX196OnpVSgmQRCQlJSEP/74A3/99RcePnwo29asWbNy5Uue2vwmqnLMV8nNzcXNmzdhYWGBhg0bVjkuFRWVankSdVWHf1W2X4iIqO5SA7Rh4+WLU4Zr4Dn8AMaKm2DL4OYKij6/mN0n/66NowE8xzvBp78NKnZJUI0ex2Lp7CPYYO6K+MmSCuxgBKfBBjDblwazubaoyB51XmEqIjbux6rdqQiKKX7L/hMjeI4ZCB9HpiyvkrpnOYzHZcq9Z2SjgX6D7DBlvBNsdN91RP9i/6wgLMs2x8Yfe6BqMxdqXunll3NycnDgwAF06dIFU6ZMgaamplxZNzc3DBs2TOGFdcOGDaGurg5VVVX4+/vD0dERgYGBGDt2LADAzs4Ovr6+sLe3f+2FeXR0NHx8fNChQwcsWLAANjY20NHRwYEDB7B+/fpqannNH1MZsF+IiKiEbI6MWZ8JOJXxqqKKJUSlYU7UThz0c3/3d3Oy7uHUllwk+FV8F6P+PrhRhXbWlNykA5jzuzZ8vneCWXXPaLp5BOMGRWPDDfm3o/elInrfDuhf8cWID6r5mHVcakIuNiREYcPR2zi1dyTsy4+OeYsyce2PZ4jp9y6PWf1KL78sCAL09fXxn//8B66urnBzc5Mrq6amho4dO5ZLcMpSV1fHgAEDMGDAADx58gSxsbEIDAzEqFGj8Msvv5Srt7QnT55g9erV6NKlCxYvXgwdHR3ZNi0trTdo6cvVxDGVAfuFiIhKq8Lyy2Y4lTEPQsY8CBkzcO+AJdwMgeiFkQi6W/0Bvu/uXbmFoMAodPtiJ2KzqrHivDj8OK44ibEfL8Gpi1Oen9N5yLk+Age/16/DQwyrl//ReS/67oob/PsAiEnBkkOpNR2a0hOJRBg0aBDatWuHTZs2yYYRqauro0mTJoiLi5MbWqRIcnIyoqOj8eTJEwDFd2p69OiBX375Bebm5oiIiHjlvIr09HTcunULYrFY7sIZAJ49e/aGLXx3x9TQ0ICpqSlSUlJkfaFsauJcEBFR7fWGz5HRgJHjCMwZCQCZSCmdyJzdBJH+9/jxbNl9UrF19PcQjQ5F6cu81D3LIdLfhGhkImHPBvTr9j1E+t/DuNtSTA1JKVUyFj/qF28TtY5BEAAsPFD8c6mX/HFL7VPyWhyroD3PY1sci9y4IAzu+D1ELRfhx7PPiu9edCv+eepfZS9QM5EQsgnjXJ/X3fJH9JsXioTHlepMhcw+8cH5IDO0ik1Ah0FrEHrzzesEgMyjkZgTAxiNcUTQInfYm2rLtmk0toTbxJHwUHQ3pjAVESuWo1XL4ra2GrcJEYpiepyC0LVrMNj1RZ93G70Gq6LKX9xX/NyXij8hFFNH/whx2fOq/z1E+suxVS6pfnvnpyyND+ww27d40nTQjXultjz/DCr43BW3v0zMZzc9f68QqVFbX8Te8UeMWxuHFwPaUrFtzAKoGSxAfcMQfA8AB8/D1mpJ9TeuhhgaGmL06NE4f/48QkNDIQgCGjRoABcXF6SkpGD79u1yzw/Jz8/HunXrcOnSJQBAYmIiPD09cejQIQiCICtXUFCA/Px8qKuryy0CAEC2xDMA6OrqonHjxrh48SIePXoke//q1av4888/q9Sm0vUr8jaOqampCWdnZ8TFxeH48eNyfZGQkICUFMXftYowMDCAkZERLl26hMzMF5/OBw8eIDZW0e/aqnkb/UJERMqrGgYr5SKn5I+Zb7x0/y34D1+D0EO5sndSE55h6RdboX90CmZ3ekf3COJPYWpwGoJuAEAu5vy+AxmJt7EhofjnpQsi4eky9Pkcm1SEzvgD/da+iBn3CxG6IgahO5KwJ8JXcUJQCXq9RuJg8AGMHRCLfq5LsWTXBEyx1X79ji+VhoiQNAAa8Pdxqvjk/cdJmOMRgw1RL95K2JMC55s7cf7I0FJzjlKxddKmcvOpioesrUFK0BQs6VU2/oqf+9yzm+DWOwXRFQr67Z+fsnJzCgAARmpv+vXKxJYpyzHl0LMXSf+NQmyYEYSMxk2wZ/D7M4fJ0dERTk5OWL9+PRwcHGBhYYF+/frh3LlzWL16Na5cuYIBAwbg6dOnCA4ORkpKCqytrSEIAnr06AEPDw/MmTMH0dHR6NOnD+7cuYPNmzcjMzMTAwcOlK1cZmlpiQYNGmDJkiVwcHDAoEGDYGZmBhcXFwQEBMDT0xO9evXC3bt3ER4eDnV1dWRkZCA9PR0WFhavbYei+sVicbly+vr61XbM0pydnWV9ceLECYjFYvz777+IiopCYWFhpeoqTVdXF87OzggICICvry8kEgmePXuGsLAw3Lt3r9om4b+tfiEiIuX0Zndk8tIQu+cPzFkBwM4Gbm88e74QoYdy4TTNCfdSi4eu3fhVD0Ahfo1Kel5GgtklQ9uu2MEDAPzcZUN7Sl6zO5Wut9Q+Ge7wf10Y+9KwysgW8Q/HYkt/AFtuYynMcOr2FBwcDyDuHlIeFBfNPLQHY9fmwn6yI+Kvzy4+xkNfxG9uDvv7mfBZH43cVx2rgjQs3bHlpCtmmz7D1I9WYc6xtDeo7RYSdgCAMVqVv4Z6uWOZ2BClgdm7xiInYx6E1JFY/wmAmASElvmjq8YHBvh11wjcuz37xTDEXc1hD2BpyBlklqu8IuceAFKwJSAF0Yba8D/wPI6MGciIlGCEIWD0mT3uZbyY2/Ouzk+xXGTGhWLqT7cBaGCqi+LVtCoj9NAzaHpKcP76bAgZs5ERZAYJgKCjl54nN0YYvnEuCtPmIu9+f8wDgH4dEJc49Y2PXZvo6Ohg5MiRyMzMxLZt2yCVSqGlpYUFCxZg+fLlyMjIwPTp07FixQqYmppi//79+OijjyASiWTlZs6ciWvXruH//u//sHLlSrRp0wZbtmyRe56Lra0tJk+ejJSUFKxfvx6ZmZkQiUT48ssvsWTJEuTn52P16tW4ceMGFi9ejO+//x4AcPNmxW6VKqpfkeo8ZmlaWlqYPXs2JkyYgKioKAQGBiI3Nxdr1qyBs7NzpesrHe+oUaMwffp0XL16FStWrMCZM2cwc+ZM+Pu/9jdupY7zNvqFiIiUlFBh94Qto+YL0FPw6vCrsOd/ZYrH/CFAb77gH/OSekYdFO6Vfnf3LwL05gtuqy4JOaWLZ4YLPnrzBQScLx/SnYOCx8u2vdR5wf+l+5S00V/wjymO4lRAcRunRDwt9fMvwpY7giAIGcLBb+cLcNwhlK8tRwifO1+A9RYF297As0vCkmHFMY34PVG+ryrseR/o/SGcqlD55/1i/bPgH/NUbktG2K8vOc+vOO4bnfvndfiGCxlydRcI4d+XbdPbPT8lcSt6jd17S3HbFXzuiusp+Uw99/z7Yz89XLghV+Ls8m4AACAASURBVDpeWOL4og+LiooEqVQqFBYWCnl5p4V5urMFjNgpxN2/LyQmJlaxZURERFQdEhMThfv37wsZGRlCVlaWkJeXJxQWFgpSqVQoKiqq6fCU3psPLetjg/Prh0JSLasz6cHzE1v5Sea6+jB+WfG3xgROcsPYzDC43FAoALiF2IMA7iegg/73L6nrIVIeAJKm1RWbBvTe+bK+z9nbYmwn+X7QM9ABoODuUHoctm6MxIa9aYhIqEjlFT33GtCwBXAoFhvOtoVPJwNoIBeZZw9g6VYAvfRKDZWrifOjjSWRbzr074V+Q5zKPKRWH8bm1VI1ERERkVJ7g1XLng8VOpQAtymhqPo0UWVWCNx/h4dLj8acQVsxbocGZgf5YIuXZRVXFtODUS8AuIf4G68rWwV3j8DTIQieCyuaxFSGDXx+bA6b+88wtfcqaOp/D5H+Iuj3TkDofQ34+5W+8H8356dk1bKcK06YbfcMU4dswFaObiEiIiJ6q97gjowGjFw8sdRvKbotjMHU3h0UTj6Ov50GdDKQ/ZybdAYHowHYV/3ItYcxzD4B8FiCG0HuZf5yXr1ykw7g62Gx2JBVHX/xN0OHnmrAsVys2h4DTz+7al1qOeVoLLbeB+wnO2HLFHuYNSj5mMXiR/0DmPNGtacidMttJNjpYWyzZ4jYV4gUAPafNMfX0wdjhE3pfnl35wcAND5wxJyFKQjtnQLPeaFw+q9b+YUUkh8iFXjxflYSDh7NBN7942SJiIiIlNobL79sP8YePoZA0OyDiCj9KIb6GpAA2Lo/8vkyt7lIjdqKwQOKL3KrRQON4qFHGyKxIU7xpNm3ywj2LtrAsViMWxiBhAdv5zkGmf9sRb8BsdjQ2AgHj0yplmFLkk8l8AAQuzgU/WaHIlYWeyFyHyQhdMWmKj8XKPNR8fR5Y/PmMG6gVlznzVhsXRyJDW8a+N3z2LID8PB0xZJffHA+tfhuyKn/jsUIm7LJwLs5P6VpdHKD/xgA+2Lwa1TpZQSeD4nbkYA9CcWf1dy7Mfhx3FaM21FtR4dGGwAHr2BDJB/qRERERHXbm8+RaeyEKbNisWrSbSzdngInr+d/97a1haddAmL3xaHVvrgX5cUGGPtJ2ptf0AKAblu4jYnCqo2ZGPfRcowrtcn/6IuVy1L3LIfxuDKJzsIDEC08UPzvT+xwT9FfzyvA7FNXLNkRhKmLo9BqcVT5An7uEKZVfTm3lH2r4Dw6DTnVOhcJwAduWLLtFu4NT0VEYAw6BMaUKaCHLZ9WrWobRzNIkIKgrzdB8+s3jlReAx0YGwKrJu1E0KQy28RqGDvMHlO8nWDzvJ/e9vkpzwBuPhK4bYzFjyuPYKxjyZ0gGzgN0wDiMvF1t+Uo3S1jPfWwYUt1HLslenyqAVzOwi/jtsI75svqqJSIiIioVnrDOzLFzIY5wd8WCP0pFKHpJe/aYMp6V/gPVitOEAzV4DbRDvERPhjbqjqOCgAGcFswAnv8DOBkU111VlJ9W0wJmoDwRUbwsKv+6o1bm8DD2xGnNldjEvOcWZ8JOHXNDesnluo/QzU4eZphy9HPZEsYV5ZGpxHYs8sMY0v6w1ANbmNscfDcSOwZ84ZB6zpiyqziO1Jmdhry5/1GITYsjEKr0QdezNl6y+dHIbEr5vipAUdjsfTQi4UQJN6eCPfTg41h8c82fZpjyylfrB+lX22HbjfeC0fmN0W/9tVWJREREVGtJBKEUo93JqrlcmO3wskpCca/jsQezzKzXtKjMWfQEfwYp4ctV3yrnIgpE0EQZC+pVIrCwkLk5+cjPz8fjx8/hqWlZU2HSERE9N5KSkqCrq4u1NXVoa6uDjU1NaiqqkIkEsleVHXVckeG6F2J/SsJ0QCQl4vUrFJPIs97hpT4JMTGATDUh9l7kMQQERERvc/efI4M0TtkY2sAI6QhaMpOBE1RVEIDPr+61o1F8YiIiIjopXhHhpSKXp8JiD1qB39PDdiLX7xvZKOBsRMlCL82Bb+6VGXZBiIiIiJSJpwjQ6TEOEeGiIio9uIcmbeLd2SIiIiIiEjpMJEhIiIiIiKlw0SGiIiIiIiUDhMZIiIiIiJSOkxkiIiIiIhI6TCRISIiIiIipcNEhoiIiIiIlA4TGSIiIiIiUjpMZIiIiIiISOkwkSEiIiIiIqXDRIaIiIiIiJQOExkiIiIiIlI6TGSIiIiIiEjpMJEhIiIiIiKlw0SGiIiIiIiUDhMZIlJaQUFBaN26NQ4fPlzToVA12bVrF8RiMU6fPv3O9w8ICICjoyOuX79epWPXVVKpFCtWrIBEIkFcXFxNh0NEJMNEhohqhdOnT0MsFsu9bGxs4OXlhfDwcOTn55fbR0tLC9ra2tDT06uBiJVXRkYGPDw8MGnSJOTk5NR0OHXC9evX4ejoiICAgJoO5Y0oSgRVVFSgoaEBAwMDaGlp1WB0RETy1Go6ACKi0pydndG6dWsAQH5+Pk6cOIEvv/wSvXv3xsKFC9G4cWNZ2T59+qBPnz41FSrRe0EkEmH8+PEYP358TYdCRCSHiQwR1Squrq4YMmSI7OepU6ciODgY8+fPx6JFi/D9999DU1OzBiMkIiKi2oBDy4ioVlNRUYG7uzvGjRuHsLAwxMbGyrYpGgaTn5+P3bt3w83NDWKxGBKJBH5+frhz5065ujMyMrBs2TJ06dIFYrEYzs7O2LhxI3Jzc+XK5efnIzg4GO7u7q+ss2SOxeXLl7F69WpZve7u7jhz5gwEQQAA5OTkYNKkSfDw8EBycjL8/f0hkUhgZWUFLy8vJCUllYv1zp078PPzk5Xz9PTEqVOnZHVWpE0lx+3YsSMuXLiAAwcOoHXr1q8dYpabm4vdu3fLtX/SpEm4efOmrExl2yQIAiIjI2V1Ojg4YOPGjcjOzn5pHG+yf0X7r7L7lgzTc3V1xZ07d7B69WqIxeJyQ8zi4+Ph7e0NGxsb2NjYwNvbG/Hx8a88bslwyy1btmDjxo2QSCSYOnUq8vLyXjqfp+wQt4qel5JjzZgxAwAwfPhwue9W2eOVHGfp0qU4deoUPD09YWVlhS5dumD16tXIzc1FWlpaueNdvny5XDtzc3OxceNGODs7v/J7SERUFu/IEFGtJxKJ0LNnT/z+++84efIk7O3tIRKJypWTSqUIDAxEYGAg+vTpgy+//BKJiYkICgrC6dOnsXbtWpibmwMAHjx4AF9fX1y4cAFDhgxB+/btERUVhZ9//hnx8fFYsGABtLS0kJOTgyVLlmDz5s2yC7fSda5evRqWlpayGDIzM+Hj4wMjIyMMHToUWVlZOHjwIHx9ffHf//4XLVu2lJVNSUnBmDFjYGFhAS8vLzx8+BBBQUGYOnUq1q5dC0NDQwBAUlISvvrqK+jq6mLixIkAgODgYIwaNQo///wzPDw8KtSmefPmYezYsejTpw8WL14MIyMjjBs3DgYGBlBXV1fY99nZ2Zg7dy4OHjyI4cOHY/LkyUhJScHmzZsxevRouT6tTJsOHz6MyZMno1GjRhgzZgw0NTUREhKiMIlTpDL7V7T/FHndvv3794efnx9SUlIQEBCAbt26YejQoWjSpImsjlOnTsHHxwdt27bF/Pnz8ejRI+zcuROenp6yhPdV1q5diwcPHsDCwqLK88Fed14sLS2xefNmREZGYt26dZgzZw6sra3lPtuKbNy4EaGhofj444/RoUMHnDx5EgEBAUhJScGNGzegpaUFLy8vpKenIyQkBL6+vnKfmZLPV1RUFDw8PGBlZYVjx47h559/xtWrV3kHloheTSAipVVUVCRIpVKhsLBQyMvLE7KysoSMjAzh/v37QmJiYk2HVyn//POPYGZmJuzcuVPh9kePHgmDBg0SfH19hezsbEEQBGHnzp2CmZmZ8M8//wiCIAg3btwQevbsKXz33XdCYWGhbN/jx48Lbdq0EVatWiUIgiAUFhYKS5cuFdq3by+cPHlSVq6oqEjYsWOHYGlpKfz999+CIAjCwYMHBUtLS2H16tVydSYmJgpOTk7CV199JWRlZQmCIAiLFi0SzMzMhKVLlwp5eXmyshEREYKlpaWwceNGQRAEITs7W/D19RUsLS2FHTt2CFKpVHb833//XTAzMxMiIiIEQRCEJ0+eCBMmTBC++eYb4enTp7I6s7KyhIkTJwru7u5CampqpdqkqC9f5uLFi8LgwYOF7du3C0VFRXLvd+rUSdanlWlTamqq4O7uLnz66adCamqqXJumTJkid04Vqcz+Fe2/kvPXvXt3ISkpqdL7JiUlCd27dxcWLVokF+vdu3eF/v37C4sWLZL7TKSmpgqffvqp3OenrJLvxLBhw4S7d+/KbSsba4mycVTmvAhC+e/Uy45Xcpyy5yAjI0Pw9PQUzMzMhICAALnvTNnvQUkM3bt3F65cuSIrp+gzS6SsEhMThfv37wsZGRlCVlaWkJeXJxQWFgpSqVTudypVDYeWEZFSuXnz5kuHnEilUhQWFkJHRwcqKi9+vTk4OCAqKgpeXl4AgMePH+PUqVNwdXVFp06dZOVEIhEcHBzw0UcfITk5GTk5OThy5AhsbGzwySefQFVVVVbWwsICHh4eOHHiBBITE2XvGxsbY8CAAXJ3OExMTGBoaIiHDx/KxWtjYwNnZ2dZrCKRSHbHJi0tDQCQnJyMkydPYtCgQdDW1pbtq6Wlhe7du+PGjRu4d+9ehdtUWW3btsXu3bvx2Wefyd0FMzExgYmJCZ49e1bpNv37779ISEiAm5ub7A5NSZted3eisvtXtP8UeZN9S1y4cAGpqano16+f3GfC0NAQdnZ2SE5ORnp6+ivrGDhwIIyNjV9Z5nUqcl6qws7OTu4c6OnpoW3btmjcuDHc3NzkvjNlvwdPnz5FeHg4XFxcYG1tLSsnEonQoUMHGBoa4sqVK1WOjYjqPg4tI6I6o1mzZrC3t8e6deuQnJyMzz77DO3atYO+vr7ckJxHjx7h/v37cHJyKjekqlmzZli/fj2A4vkmN2/ehKWlZbkhPSKRCJ06dUJWVhZSUlLQvn17AMVzehQNe1NEVVVVLuFSJCkpCVlZWRg7duxLy+Tl5VW4TVWRnZ2N0NBQbN++HZcuXUJhYaFsW9euXeXKVqRNN2/eRGFhIVq1alWleCqzf0X7r7r3LREXF4f09HQMGDBA4fZmzZq9to7SyUBVVeS8VCcNDQ3Ur1//lWUePHiAlJQUnDhxAn/88YfCMmUTZSKi0pjIEJFSMTU1hYaGhsJtmpqa8Pf3h6OjIwIDA2UXoHZ2dvD19S03t6b0PAZF0tPT8fDhQ3Tt2vWVF2WlL+zfBn19fUyfPh3NmzdXuN3S0hKPHj0C8Po2VVbpOQxeXl6YPXs2TE1N8eTJE/j6+lbrsd6WivTf29i3RIsWLTBt2jTo6uqW26aqqgojI6PX1lGXjR49Gk5OTgq3VffnmYjqFiYyRKQU7t69i1u3bsHe3v6liQwAqKurY8CAARgwYACePHmC2NhYBAYGYtSoUfjll1/g5uYmK1t2qFdZjRs3RpMmTfDgwQPk5eWVS2akUumbNaqCMjIy8OGHH75y2FVJIvO6NlXW8ePHceDAAaxZswa9evWSva+iogI1NeX4L6Qi/fc29i1RWFgIKysrWFhYVLmOuqx+/fpwcHCo6TCISAlxjgwR1XpSqRSHDx/G48eP0blz55cO3UpOTkZ0dDSePHkCAGjYsCF69OiBX375Bebm5oiIiEBeXh4aNWoEQ0ND/O9//0N+fr5cHRkZGVi7di0OHz4MDQ0NmJqaIikpCZmZmXLlBEFAbGwsGjRoUKG/yleVgYEBAODs2bOvXCq4om2qrISEBBgaGsLExETu/fz8/HLHqShTU1OoqalVeIWyN9m/ov1X3fuWaNKkCe7cuYOEhIQq7V+X6ejoQFdXF5cuXSr3/SIiqggmMkRUq0mlUuzYsQPr1q3DJ598gs6dO7+0bGJiIjw9PXHo0CG5C8+CggLk5+dDXV0dKioq0NXVRbdu3XDkyBGcPXtWVk4QBISHh2P58uXQ0NCApqYmXF1dkZCQgIMHD6KoqEhW9vr16wgKCkL37t1hZWX1dhoPoE2bNujcuTO2b99e7mL46tWr+O2335Cbm1vhNpVWVFT02gt0ExMT3LlzB1euXJGVzc/Px5YtW177HJSXsba2RufOnbF//37cv39f9n52drbcM4GqY/+K9p8iVdlXKpXK9amjoyOaNWuGTZs2yS0MIAgCoqKisGvXrird2bO0tCyXIAmCgAsXLsj1SVW9i7uNBgYGcHJywqlTpxAaGir3/Xr27Bn+85//4NatW289DiJSXsoxLoCI3htHjhyRPWgyPz8fx44dw7Vr1+Dh4YFZs2a98pkSPXr0gIeHB+bMmYPo6Gj06dMHd+7cwebNm5GZmYmBAweiXr16AIDPP/8cMTEx8PHxweDBg9GuXTtERkYiJCQEAwcOhJ2dHQCgV69e+OKLL/Dzzz/jwoULcHFxwY0bN7Bt2zZoa2vj22+/hZaW1lvrjyZNmmDixImYPHkyRo8eDQ8PD7Rq1QoxMTHYtWsX+vbti8LCQmhoaFS4Tdra2mjZsiX27t0LHR0dSCQSub4prUuXLrCyssLMmTNx7NgxfPDBBzh27Bju3r2L+vXr4+7du698mKYienp68Pb2ho+PD4YNG4a+ffsCAI4dO1ahFbQqs39F++9N+h4AmjZtCjMzM+zbtw8FBQXo0qULPv74Y5ibm2Py5MmYPXs2RowYgaFDh6JZs2b466+/cOjQIXz11VeQSqWVntDfsWNHWFlZwd/fH7GxsWjYsCHi4uIQFRX1RnO2LC0t0aBBAyxZsgQODg4YNGgQxGJxlet7FVVVVXz++ee4ePEi5s+fj3/++QcuLi64c+cOdu7cCRUVFbmhoEREZTGRIaJaJTw8HOHh4QAAXV1ddO3aFXPmzEHXrl1fe7GnpaWFBQsWoFWrVti9ezf2798PXV1ddO/eHf/3f/8nt8pV06ZNERgYiN9//x3bt2/Hxo0b0aZNGwQEBODjjz+WrfylqamJGTNmoH379li3bh0mTZoEXV1dfPLJJ5gwYcI7majdrVs37Nq1C6tXr8b27dvx+PFjtGzZEj/88APc3d1lsVa0TfXq1cO4ceNw8+ZNbNu2DVKp9KWrapmammLDhg0ICAiQDbfr378/Ro4ciR9++AG3b99GTk5OpR9aaG9vj1WrVmH58uVYvXo1jI2NMXbsWIjFYowZM6Za969o/ylS0X0bNmyIiRMn4rvvvsPvv/8OXV1dfPzxxxCJRBg0aBCsrKwQGBiIFStWQCqVom3btli7di0++uijKq0mZmpqiiVLlmDx4sX4888/0aBBA/Tt2xd79uzBvHnzKl1fCVtbW0yePBkrV65EQkKC3Lyot6Fp06b47bffsH37dmzZsgUhISFo0qQJ3N3dMWHCBNnwPiIiRURCVQf+ElGNEwRB9ip5hkrJ3IXHjx+/1bkbRERE9GpJSUnQ1dWFuro61NXVoaamBlVVVYhEItmLqo5zZIiIiIiISOkwkSEiIiIiIqXDRIaIiIiIiJQOExkiIiIiIlI6TGSIiIiIiEjpMJEhIiIiIiKlw0SGiIiIiIiUDhMZIiIiIiJSOkxkiIiIiIhI6TCRISIiIiIipcNEhoiIiIiIlA4TGSIiIiIiUjpMZIiIiIiISOkwkSEiIiIiIqXDRIaIiIiIiJQOExkiIiIiIlI6TGSIiIiIiEjpMJEhIiIiIiKlw0SGiIiIiIiUDhMZIiIiIiJSOkxkiIiIiIhI6TCRISIiIiIipaNW0wEQ0duTlpZW0yEQERERvRVMZIjqMAMDg5oOgYiI6L2VkZFR0yHUaRxaRkRERERESoeJDBERERERKR0mMkREREREpHSYyBARERERkdJhIkNEREREREqHq5YR1SG51u0AFH+xGwN4VKPREBERvd8aAyg8d7Kmw6izeEeGiIiIiIiUDhMZIiIiIiJSOkxkiIiIiIhI6TCRISIiIiIipcPJ/kR13NMTf9V0CERERO8Fne4uNR3Ce4V3ZIiIiIiISOkwkSEiIiIiIqXDRIaIiIiIiJQOExkiIiIiIlI6TGSIiIiIiEjpMJEhIiIiIiKlw0SGiIiIiIiUDhMZIiIiIiJSOkxkiIiIiIhI6TCRISIiIiIipcNEhoiIiIiIlA4TGSJSSnl5efD394e3tzceP35c0+G8FzIzMzFlyhTMmDEDWVlZNR0OERG955jIEFGtkpubi127dmHcuHHo1asXevfujW+//RbR0dGQSqU1Hd574fHjx/D29oa/vz/y8vJk76uqqqJ+/fpo1KgR1NTUajBCeWFhYejVqxcuXrxY06EQEdE7VHv+JyKi9969e/ewcOFCXL16FZ06dYKDgwMKCgpw+vRp+Pn5wcPDA+PGjYOmpmZNh/pe0tHRwcKFC2s6DCIiIgBMZIiolsjJycH69evxv//9DwsXLkSnTp0gEokAAKNHj8bmzZuxbds22NraomfPnjUcLREREdU0JjJEVCvExcUhMjISXl5eckkMANSrVw/9+/fHyZMnERERgS5dukBF5cXI2MePH+PPP//E4cOHkZOTg65du+LLL7+EqamprIxUKsWZM2ewa9cuxMXFQUVFBR06dICXlxesra1lx7t48SImTZqE6dOnw8DAAOvXr0diYiIaNWqE4cOHY+DAgahXr56s3idPnmDXrl3Yv38/cnJyIJFI8Pnnn+PAgQMAgGnTpqF+/foAiuf1BAcH48CBA7h16xZMTEzg7u6OAQMGyMoosnbtWkRERGDatGkIDQ1FZGQkpk2bBmtra8yYMQNOTk4YP368wn0WLVqEFi1aVKhdRUVFWLx4McLDwwEACQkJCA8Ph7OzM6ZNm4bc3FzMmjULH3zwgaxdpeutX78+duzYgcTERJiYmODzzz+Hs7MzHjx4gPXr1+PEiRMAgO7du2P8+PEwNDSUi/n69evYvHkz/vnnHwBA165d8cUXX8DCwkJhv5Qcu0TJv5csWYLw8HCcOXMGAQEBEIvFsjLp6emYPXs2DAwM4Ofnhz///BPnz5/H9OnTERYWJvsMSSQSjBs3DlZWVnLHlEqlOH78uKydjRo1Qv/+/TF48GA0bNjwpeeQiIiqHxMZIqoVLl++DHV1dXTs2FEuiSmhp6eHjh07IjU1Fbm5udDS0gIA3L59GzNnzkSLFi0waNAgPHr0CEeOHMGTJ08wb948NGrUCFKpFJs2bcLWrVvRs2dPzJ07F0+ePMHBgwcxffp0/PDDD2jXrp3c8bZv346CggI4ODigc+fOOHfuHFatWoV69eph4MCBAICsrCwsW7YMJ06cQKdOnWBlZYVHjx5h1qxZKCoqgoODg6y+nJwc/PLLLzh79ixcXV1hZmaGf/75B2vWrEFycjImTZr0ymTm6dOnsmFdFhYWUFdXr1I/v6pd/fv3x5AhQ/DRRx9h/fr1MDAwwNChQ6Gvr//aOTG//vorWrRogc6dO6Njx46IjIzE4sWLce3aNZw9exbm5uYYNmwY7ty5g6ioKGRmZuK7776Dnp4eAOD8+fOYP38+WrZsiYkTJyIzMxNhYWH49ttvFZ4fAGjRogWWLFmCmJgY7NixA97e3hCLxTA3N0dBQQHCwsIQFxcnl8jcvn0bycnJ6Nu3r+wzlJycjG+++QZt27aV+wzNmjUL/v7+sLGxAQDZ52j79u3o27cvBg8ejPj4eGzfvh2XL1+Waw8REb19TGSIqMbl5eXh7t27aNKkyUsvBNXV1eHt7S23D1CcIHz11Vfo06cPVFRUIAgCzMzMsHLlSvz7779o1KgRHjx4gGvXrmHEiBEYOXIkVFVVAQB2dnaYNWsW/vrrL7Ru3VruYl1fXx9+fn5o2rQpAGDAgAGYM2cOYmJi0Lt3b2hpaeH06dP4+++/8e2336Jfv36yu0S9evXC/PnzZXUJgoCwsDBcvHgRixYtkt1h6N27N8LCwrBs2TL06tULnTt3fmkfZWdnY/Dgwfjiiy9kd4T+97//VbqvX9cua2trGBkZQVtbG40aNYJEInllglWia9eumDp1qmz+Uv/+/TFz5kwEBQVh2rRp6Nu3L0QiEQRBwN69e7Fy5UpcvXoVXbt2xYMHD7B69WoMGDAAo0aNkrWvT58+mD9/Pvbs2QMrK6tyc6NKktsHDx4AAKysrGQJj6WlJaytrXHu3Dm4urpCU1MTgiAgLi4Ourq6sLW1lavLx8dH9hkCXpzD4OBgWFhYoF69eoiJicHevXuxYMECdO7cGSKRCK6urnBwcMD8+fNx/PhxWZJLRERvH1ctI6JaQ0tLS/ZX8ooyNzdHt27dZBegIpEI5ubmAID79+8DAIyNjfHzzz9j9OjRsiQGAHR1dWFmZoacnJxyK6K5uLjILvZLypqamuLRo0coKChAfn4+Tp8+DQsLC9jb28sNdTM3N0fz5s1lP2dlZSE6OhoODg4wMzOTvS8SidC6dWs0btwYSUlJr2xn06ZN4eTkJDesrSpe166q6tSpk1yi0bhxY1hZWcHCwkJ20Q/In5+MjAwAxUPY0tLS0LNnT7n2NW7cGLa2trh58yYyMzMrFY+enh7atm2L+Ph43Lt3D0Dxebh48SJatWoFY2NjueO0bt1a7hza2tqie/fuiI+PR1paGgoLC3Hy5Em0bdsWtra2cncNrays0KJFC1y/fh2FhYWVipOIiKqOd2SISKmpqKgoHIpWVkFBAWJiYrBv3z5cvHgR+fn5sm3Ozs7lypdOeBTJycnBrVu30KJFC+jo6LyybHp6Om7duoWzZ88iKChIYZnXPZdFJBJVqJ2v87p2Vbd69eq9Nvm6du0aMjIyys3zKWFoaCh3vipCVVUVDg4O2Lt3r2x42b1795CcnAwvL6/XrnxXr149tGrVCkePHkVaWhq0tLSQnJyMhIQE9OvXT+E+H3zwAaRSaa1ampqIqC7jb1sixMtNCgAAIABJREFUqjUKCgre6K7Ay0ilUvz555/Yvn07PDw8MHz4cJiamkIqlWLVqlXVfryXGTx4MOzt7RVua9So0TuLozZq1qwZxo0bpzApVFVVRZMmTSpdp1gsRtu2bREXF4c+ffrg0qVLUFdXR/v27Su0v6qqKgoLC1FUVCR7r2fPnujXr5/CpFJbW5tJDBHRO6S2YcMGuTcEQaihUIioskq+r4IgoKioCENrOJ6qql+/Pj744AOcP38eGRkZMDAwKFcmPz8f69evR2pqKiZPnlypIWiJiYnYvXs3JkyYgEGDBskuQvPy8t54qFZllCxmQOUVFhbCzMwMLVq0qLY6tbW1IZFIEBISgpSUFFy8eBFt2rSBkZFRhfbPzs6Gmpqa3JAzVVVV2NraVmjeEBERAGzZsgWqqqqy3yelRxJUx53295matrZ2TcdARFVUNpFRZm3atMG2bdtk807K/nJ/8uQJYmNjYWxsDA0NjUrVfevWLWRnZ8Pc3FyuXqlUitzc3ErXBwCampowMTHBjRs38PTp01de2DZo0AA6Ojq4du0anj59+tqhaO+bxo0b4/79+0hOTq7WREYkEqFjx47Ytm0bDh06hMTERIwfP75CSUhBQQGSk5PRrFkzGBgYoF69emjUqBESExORnp6ODz74oNriJKK6TVtbW5bIqKqqMpGpRmpl173nHRki5VGXEpnWrVuja9eu2LlzJ6ytreWeJSOVShEcHIx///0Xnp6e0NTUlK1aVhGGhoZQU1PDhQsXYGtrCxUVFRQVFeHvv//G6dOn0aNHj0rHq66uji5duuDIkSM4c+aMbFUuoHg539u3b8sudvX19WFvb4/Nmzfj77//llvhLDs7W7acb+kJ6BXRqFEjNGnSpFyClJ6ejvj4+Eq3qayioqJ38n9Cp06dYGhoiL1796JNmzayxQgEQcC5c+fw8OFDuLq6vnZ+T9kFG4DiJZolEgn2798PExMTtGzZslyZkv4yNTWVncO4uDicOHEC3bt3lyUyDg4OOHnyJA4ePCi3ulpBQQGCgoLQrl07hfUT0futQYMGqFevHhOZt0CtYcOGTF6IlJQgCLLvr6KLOGXSoEEDjB8/HgsXLoSfnx8cHR1hYmKCgoICnDt3DomJiRg5ciQcHR0rXbdYLIZEIsGff/6Jq1evwsLCApcuXcL169ehrq6Ou3fvIjc3t9LDhezt7eHq6oply5bh7NmzMDExkT0npfTkdFVVVQwcOBBXr17FihUrEBsbCwcHB6SmpiIsLAwqKiro2bNnpdulra0Ne3t7rFu3Dj/88ANsbGyQnZ2NyMhIPHjwoNwDJyuqQYMGMDc3x5EjR7Bq1Sq0bt0aLi4uVaqrIkxNTTFmzBgsXboU3377Lfr27QsjIyOcPHkSUVFRGD58OIqKil6ayHz44YfQ0tLC+vXr0bFjR7i6usLExARA8Up4dnZ2OHnyJNq0afPSuTa//vorzp8/j+7du+PGjRsICQmBnp4ehg4dKktYevbsicuXL2Pr1q1ISkpCr169kJ2djfDwcNy+fRtisRiCIPDChIjk6OjoyCUyJb/LqmsRl/eZmo6ODhMZIiVVlxIZADAxMcGyZcsQHByM4OBgHDt2DOrq6mjXrh0WL16MDh06yM1XqCgdHR3MmTMHmzdvxuHDh3H+/Hl07doVK1aswL59+xATE4PMzEzo6upWql5NTU34+PigadOm2L9/P6KioiCRSLBgwQL88ccfcmUbN26MBQsWyLWtUaNGcHZ2xvDhw6Gvr1/pdolEIgwePBgikQi7d+9GTEwMrKysMGHCBGRlZWHLli2VrhMA1NTUMHToUNy9exchISEoKiqCk5NTleqqCJFIhN69e8PMzAx//vknNm3ahKKiIlhbW8Pf3x92dnavPO9WVlYYPXo0Nm3ahOTkZHTt2lVuu42NDRo2bIhOnTopnBPVuHFjzJgxA6dOncLSpUuRk5MDiUSCcePG4cMPP5SV09TUxKRJk9ChQwfs3LkTAQEB0NHRQefOnfHdd99V+o4aEb0ftLW1ZSs4MpGpXqKkpCQB4JAyImVUksiUDC0z+rj8w/ienvirBiJ7v2VmZmLmzJlo0aIFpkyZAnV19ZoO6b22f/9+7N27Fz/99FO5ZGPt2rWIiIjAokWLqnV+DhG9n3S6l797nbhrsyyRKZnwX5LEMJF5M2qK1tJnUkOkHMomMvRupaenIyQkBEOGDJGtoiYIAs6fP4/k5GT07duXSUwNe/LkCU6cOIH27dtXaQlnIqI3paGhAXV1dSYyb4Ha68aEM6khqr2YyNSsp0+fIjw8HEePHkXfvn3RvHlzREdH49ixY+jQoUOVFhGg6nHr1i2EhYXh9OnTePjwIUaPHs1nvBBRjahfvz7q1asHdXV1JjLVTO1dPkOBiKpX6USmLsyRUTYffvghli9fjn379iEoKAiPHj2CsbExvLy84OHhUaVlnal6PHnyBHv27EHz5s3h7+8PGxubmg6JiN5TJUmMurq6bI4ME5nqIcrKyuItFyIlVTaRKbK1K1eGc2SIiIjeDUVzZJ6e+EtuaBkTmeqjVno5Sw4jI1IupRMZQRDAwWVERES1S8lQMhUVFdmLiUz1UCvdgexMIuVSNpEhIiKi2qV0AsNEpnqpVeWZDERUOzCRISIiqt3K3o1hIlN91NiBRHUDv8tERES1T+mkRdGLqo53ZIiUGO/IEBER1W4cWvb2MIshIiIiIiKlw0SGiIiIiIiUDufIENURL/suK1rTnoiIiN4NzpF5e9Ryc3NrOgYiqqKyD8QkIiKi2iUvLw9FRUUoKiriAzGrGYeWERERERGR0mEiQ0RERERESoeJDBERERERKR21mg6AiKrRxdPIz89HQUEBcnNzYWlpWdMRERERvbeSkpKgUdNB1GG8I0NEREREREqHiQwRERERESkdJjJERERERKR0mMgQEREREZHSYSJDRERERERKh6uWEdVhaWlpNR0CERER0VvBRIaoDjMwMKjpEIiIiN5bGRkZNR1CncahZUREREREpHSYyBARERERkdJhIkNEREREREqHiQwRERERESkdJjJERERERKR0mMgQEREREZHSYSJDRERERERKh4kMEREREREpHYUPxAwNDUVISIjce/3794ebm9sbHezff//FypUrkZOTI3uvUaNGmDp1KvT09N6obiIiIiIien/wjgwRERERESkdhXdkAEBTUxPffPMNPvzww2o72IcffoilS5cCAHJychAYGIiMjIxqq5+IiIiIiN4PvCNDRERERERKh4kMEREREREpHSYyRERERESkdJjIEBERERGR0mEiQ0RERERESoeJDBERERERKR0mMkREREREpHSYyBARERERkdJhIkNEREREREqHiQwRERERESkdJjJERFStpFIpVqxYAYlEgri4uJoOh4iI6igmMkRUK5w+fRpisRi2trY4f/78K8vm5+djxowZEIvFmDRpEnJyct5RlFQRKioq0NDQgIGBAbS0tGo6HCIiqqOYyBBRrZKVlYWwsDBIpdKXlrlx4wYiIyPfYVRUGSKRCOPHj8fRo0dhbm5e0+EQEVEdxUSGiGqVBg0aICIiAnfu3FG4XRAEhIeHIz09HZqamu84OiIiIqotXprI5OTkICAgAN7e3vD29kZoaOgbH+zff//FlClT4O3tjSlTpiA5OfmN6ySiumXMmDF48OABTp8+rXD7gwcPcOTIEXTr1g1WVlbltqempmLx4sXo0qULxGIxevTogdWrVyM7O1tWJicnB5MmTYKHhweSk5Ph7+8PiUQCKysreHl5ISkpSa5OqVSKiIgIeHp6wsrKCjY2Nhg7diwuXboEQRDkymZkZGDp0qVy9Z05cwaTJk0qNwxOKpUiODgY7u7uEIvF6NKlC5YtW4aMjIzX9lN2djZWr16NHj16vHbf+Ph4eHt7w8bGBjY2NvD29kZ8fLxcmfz8fOzevRtubm4Qi8WQSCTw8/Mrl1AKgoBLly5h7NixsLGxgZWVFTw9PXHq1Cm5vggICICjoyOuX78ut//Nmzfh5+cHiUQCsVgMd3d3BAcHy92Bu379OhwdHREQEIDLly/Dy8sLVlZWkEgk+Omnn/Ds2bMq9wUREdUdvCNDRLVKu3bt0L17d0RERMglHyXOnDmDhIQEDBgwACKRSG5bUlISPD09ERoaigkTJmDDhg1wdnbG8uXLMX/+/HJzaVJSUjBmzBikpKTAy8sLQ4YMwZkzZzB16lTcv38fQHGysXLlSnz11Vdo2rQpVq5cifnz5yMjI0OWpJR4+vQp5s6dizVr1qBDhw7w9vZG8+bNMXbsWBw9elTu2CX1Tp8+He3bt8eSJUvw8ccfY+3atfjmm2+Qnp7+0j7Kzs7G3LlzsXz5cvTo0eOV+546dQqenp7IysrC/PnzMXHiRCQkJMDT01OWLEqlUgQGBsLPzw8WFhZYunQphg0bhvDwcIwcOVLuj07R0dHw8vJCRkYGpk+fjlmzZiEnJwejRo3C3r17X3luz58/D09PTxw/fhyjRo3CTz/9BH19ffj6+mLlypXlhhMGBwdjwoQJMDU1hbe3N9q2bYt169Zh+fLlsrKV6QsiIqpb1BS96ebmBjc3t2o/2IcffoilS5dWe71EVHekp6fDzc0Nfn5+uHr1Kjp06CDblp2djdDQUHTu3BkSiQRbtmyR2zcqKgpGRkaYM2cObGxsAAA9e/ZE8+bNsWzZMowYMQLt27eXlc/KysKsWbPw6aefQkVFBYIgwMrKCt9//z3i4+NhaGiIO3fu4NKlS/i///s/TJw4EaqqqgCAjz76CGPGjMH+/fvRoUMH1KtXD3///TcOHjwIf39/DBs2DCoqxX8r6tevH3x8fORijYyMxB9//IHffvsNPXr0gEgkgoeHB1xcXODj44OwsDB8/vnnCvsoPj4eBw4cwMyZMzF69GiIRCIMGjQIbdq0wcyZM3HmzBn07dsX9+7dw08//YThw4dj0qRJUFdXBwB4eHjg66+/xu+//w5bW1vcv38f+/fvx7BhwzBv3jxZG+3t7eHj44PDhw/D29sbeXl5CAoKgpGREVatWgVjY2MAxf9nfP3119i3bx9cXFzQsGHDcjFnZmZi+fLl0NbWRmBgIMzMzAAAn376KVauXInAwEBIJBL06NFDto+6ujpWrVqFli1bAihOFKdNm4bo6GikpaXB0NCwwn1BRER1j8JEhoioJrVr1w5NmzZFWFgY2rVrJ7uwvnr1Kk6cOIEZM2ZAX1+/3H5jxozBmDFj5N4TiURo3bo1srKykJeXJ7fNxsYGzs7OsoRDJBLJLprT0tIAAKampti4cWO5Y+np6cHa2hrZ2dkoLCxEUVERjh8/jlatWsnVWXKckgt3ACgoKMDRo0fRuXNn2NnZyd1ZsrW1haWlJeLj41FQUIB69eqVO7ZUKkVhYSF0dHRk+5ZcwPfu3RsaGhoAgAsXLiA1NRX9+vWTJTEAYGhoCDs7O/z1119IT0+Xq6903A4ODoiKipLFUFRUhMLCQjRo0AD169eXlWvatCk2bNiAoqIiaGtrl4sXAK5du4bo6GjMnTtXri9UVVXh4eGB4OBgHD58GN26dZNtc3FxgbW1texnHR0diMViXLlyBU+fPoWhoWGF+4KIiOoeJjJEVOs0a9YMTk5OiIiIwBdffAFTU1NIpVKEhYWhadOm+Oijj1667+3bt7Ft2zaEhITg1q1brzyOqqqq3IX7y+Tn5yMqKgqbNm3CmTNn5BIid3d3AMV3i27cuAELCwuFdyRKe/bsGa5evYoLFy7A1tZWYRlTU1MUFhYqTGQsLCzQrVs3zJ07F+fOncOg/2/v3uNyvv/Hjz+urqtSUU6JtVhJB5PJ+VDCCFk2xpzmEEOzTY7jg48Pm2Fshg0thyHmEBuzmTOTUw7NIYcpEos5JKXS6er6/dHven+7dJVyLHveb7eLW+/r/Xq9X+/X9b6u6/28XqcuXXB3d8fa2togwDtz5gwJCQn4+/sbPYa9vT0ZGRnY29vTrFkzFi9ezKVLl+jRowdvvPEGFSpUoHz58sr+FhYWtGzZkvHjx9OnTx/69u2Ll5cX9vb2jzznq1evAiiBYl62trZ4eHgQHR1Namqqsl2tVufrPvi4dSGEEOLlI4GMEKLEUavVdOzYkbVr17J//37ef/994uPj2bNnD23atMHe3p7k5OR86aKjowkMDMTGxobRo0fj7u5O5cqViYyMZPDgwY9VFv34ke+//54BAwYQGBhIzZo10Wq1fP755090nn5+fvTs2dPozbq1tbVBK0pelSpVIiQkhLVr17J06VLWr1+PRqOhTZs2DB8+nNq1ayv71qhRg7Fjx2JjY5MvH7VaTdWqVbGwsGDatGl4e3uzcOFCBg0aBECjRo0ICgqiWbNmShm7dOmCi4sL3333HVOmTCE7O5tq1aoxaNAgevfuXWALSGxsLHZ2dgaB0cO0Wm2+yRMepTh1IYQQ4uUigYwQokRyc3PDy8uLbdu28dZbb3Hq1Clu3bpFx44dla5meWVlZfHDDz9gaWlpMH4DKLC7U1GcOXOGpUuXMn78ePr166fc0D948KDAQKOoNBoNDRo0eKxppC0tLRk4cCABAQEkJiZy8OBB5syZQ58+fVi2bBmenp4AZGdn4+LigrOzc6H5mZmZ4e/vj7+/P8nJyfz5558sXLiQAQMGMHfuXGXcpEqlok6dOgQHB5OVlcWlS5dYvXo1M2fO5PLly0yePNlovTg6OnLz5k3u3buX7zmdTkdOTk6x66C4dSGEEOLlIrOWCSFKJEtLS/z8/Dh69CgHDhxg69ateHl5Ge2aBLkD96Ojo6lZs2a+X/2NzX5WVLGxsaSmpuLm5mbQcpKTk2MwC5qlpSVOTk7ExMQYbS3Ky8zMDFtbW86cOcPt27eLVZ6cnByioqI4duwY6enpqFQqKlasiL+/P/PmzUOj0bB3714gt8tWfHw858+fLzTPS5cucfjwYaXc1tbW+Pj4MHfuXGrWrMmePXvIyMggMTGRQ4cOERcXB4CpqSlubm5MmTKFnj17cvDgwQLX/6levTqQO87pYXfu3OHs2bO4ubkVK+gsTl0IIYR4+UggI4QosRo3boy7uzuff/45Bw4cwM/PD0tLS6P7mpubY29vz5kzZ7hx44ay/caNG4SEhDx2GV555RU0Gg1HjhxRWg1ycnL47bffDG6Szc3N8fHx4dy5c/zxxx8GXaTOnz9PbGys8reVlRVt27YlNjaWtWvXkpmZqTyXmZnJ4sWLOX36tNHymJiYsH//fgYOHGgw9TPktr6kp6cr3bu8vb2xt7dn5cqVBnWi0+kIDw8nLCwMrVbLxYsX6dOnD9u2bTMod1ZWFpmZmZiZmWFiYkJKSgpTpkxhzpw5BsGhVqslIyMDU1NTNBrjDf2urq40a9aMdevW8ffffxuk/emnn7h27Rrt27c3OiaoIMWpCyGEEC8f6VomhCixqlSpgq+vL19//TUeHh40bty4wH0tLCzw9fVl69at9O3bl44dO5KRkcGvv/6q3FxfvXqVJk2aFKsM+hvwBQsWcPr0aWrXrs3Ro0c5d+4c5ubmXL16lfT0dCwsLHjzzTfp2rUrkyZN4sCBAzg5OXHlyhW2b9+eb8a0Tp06ceLECYKDgzl79iz+/v7cv3+fLVu2EBsbi6urKzqdzuj4mW7duhEeHs7QoUPp1asXzZs3JyoqijVr1lClShXat28PQM2aNRk5ciQTJ06kd+/evPfee9jb27Nr1y62bdtGYGAgWq0WHx8fpdyHDx+mQ4cOxMfHExoayr1793j77bcxNTXl1VdfZciQIUyYMIFr167Ro0cPrKysCAsLIzw8nBEjRvDKK68Yrcfy5csTFBREUFAQffv2VcqyceNGwsPDGT58OF5eXsV6bYpTF0IIIV4+EsgIIUoslUrFm2++yapVq/D19aVKlSqF7t+xY0fKli3LV199xbJly3B0dGTMmDHUqlWLQYMGceXKlWIPJi9fvjxz585lwYIFbNy4kUOHDtG6dWvWr19PaGgo4eHhJCQkUKFCBSwtLZk4cSLVqlVj1apVbNu2jWbNmrFo0SLmz59vkK+lpSWfffYZLVq0YPHixXz66afY2NgoXbr0XbGM0S/MGRoayqZNm1i+fDm2tra0bduWYcOGYW9vr9SffnD+woULmT9/Plqtlrp16xISEkLLli0xMTHBzMyMzz77jNq1a7NhwwY2b96MjY0NXl5efPjhh8qAef1aN3Z2dixZsoQpU6Yo+c2bNw8/Pz+j45f06tevz+rVqwkODiYkJISkpCTq1KnD999/T5s2bQpN+6R1IYQQ4uWjevDgQfG+1YUQJYZOp1Me+vU0MjMzycrKIj09nVq1ar3oIgpyF/kcOHAgtWrV4osvvjBYg0UIIcTLKzo6mjJlymBqaoqZmRkajUaZWl7/EI9PxsgIIcRTcuvWLebNm0dKSoqyTafTcejQIc6fP0+9evUkiBFCCCGeEulaJoQQT0lSUhK//PILmzZt4r333uO1115jz549/Prrr7Ro0YKOHTu+6CIKIYQQLw0JZIQQ4impVasW69atIzQ0lOXLl3P79m0cHBwICgqiX79+Bc64JoQQQojikzEyQpRiMkZGCCGEKLlkjMyzJWNkhBBCCCGEEKWOBDJCCCGEEEKIUkcCGSGEEEIIIUSpI4GMEEIIIYQQotSRQEYIIYQQQghR6mjKlCnzossghHhMxmYtMzExwcTEhPT09BddPCGEEOJfz9zcHDMzM5m17BmQFhkhhBBCCCFEqSOBjBBCCCGEEKLUkUBGCCGEEEIIUepIICOEEEIIIYQodSSQEUIIIYQQQpQ6EsgIIYQQQgghSh0JZIQQQgghhBCljgQyQgghhBBCiFJHAhkhhBBCCCFEqSOBjBBCCCGEEKLUkUBGCCGEEEIIUepIICOEEEIIIYQodSSQEUIIIYQQQpQ6EsgIIYQQQgghSh0JZIQQQgghhBCljgQyQgghhBBCiFJHAhkhRKmUkJDA+++/T0BAAPfv33/RxSkRSmKdJCYm0rVrV0aMGMGDBw9edHGEEEK8RCSQEUK8cKtWrcLJyYmwsLAC99FqtUydOhUPDw8iIyNRq9WUKVMGW1tbNBrNcyxtyTBr1iy8vb2JiYlRtv3b60QIIcS/i3zTCSFeuJYtW+Lo6MiePXvo1KkTlpaW+fa5fv06Bw8exNPTE2dnZ6ytrVmyZMkLKG3JVb58eakTIYQQ/xrSIiOEeOGqVatGkyZNOHnyJFevXjW6T1RUFDExMfj6+mJtbf2cSyiEEEKIkkYCGSHEC2dqakr79u1JSEjg+PHj+Z7PzMxk37592Nvb07RpU6DwsRfnzp1j2LBhuLu74+7uzrBhwzh37hwAycnJ9OvXj+HDh5OWlqakSUtL48MPP2To0KGkpqYabB8+fDj9+vUjOTnZaPn13bwOHjxIUFAQLi4u/Pzzzzx48IARI0bQtWtXEhMTDdJEREQYdKeLiYnB29ubWbNmERUVRf/+/XFxccHT05MZM2aQkpICQFhYGE5OTgQHBxMfH4+vr6/SxcxYneQ9zpYtW+jcuTNOTk68+eab/PTTT2i1Wq5evUpQUJBSX0FBQcTHx+c7z8Lqtbh0Oh1RUVFKfi4uLvTp04dDhw6h0+kM9tVqtWzZsoWOHTsalP3HH3/M170uLS2N4OBgfHx8cHJyokmTJnzzzTf56l8IIUTpJ4GMEKJEcHNzw93dnfDwcINAAuCff/7h+PHjNG/enOrVqxeaz6FDh+jTpw+pqalMmTKF4cOHc/78efr06UNERATlypXDw8ODCxcukJCQoKS7fv06UVFRnDt3jhs3bijbExISuHDhAh4eHpQrV67A4967d49Ro0Zx5MgRXn/9dczNzR+rHrZs2cLQoUOpXr06w4YNo27duixevJh58+ah1Wpp2rQpoaGh+Pv7U6lSJb788ktmz55N1apVC833s88+44cffsDHx4fAwEBycnIYP348X3zxBYMGDUKn0zF06FDat2/Pjh07GDdunEH9PKpei2vr1q307duX2NhYxo4dy3/+8x8ePHjAgAED2LhxoxLM6HQ61qxZw+jRowEIDAykffv2fPvtt8yYMcMgz7S0NCZPnsy8efPw8fHhq6++on379oSEhPDJJ58YnI8QQojST8bICCFKhCpVquDr60twcDDR0dHUq1dPeS4iIoLY2FgmTZqEmZlZgXncuHGDGTNm0KtXL0aMGKHs27VrVz7++GOWL1+Oh4cHDRs2ZNGiRVy7dg0HBwcAzp8/z927d8nKyuLPP//E2dkZyA1wrly5QsOGDVGpVAUeOzU1lYCAAD7++GPluI8zS5eZmRkLFizAzc0NgPv37zN27FgOHz7MnTt3cHBwwMHBgYMHD1KmTBllzBBQaKtDmzZtmDFjhjL+qGfPngQEBLB8+XJmzpxJ9+7dUalU6HQ6Vq5cydSpUzl9+jStW7cucr0aG9tkzNWrV/n222/x9PRk9uzZVKpUCYAePXowefJkvv76a+rUqYObmxvXrl3jhx9+oEOHDkyfPp2yZcsCMGDAAIKCgvj777+VfM+dO8cvv/zC+PHjCQgIQKVS0aVLF+rUqcP48eM5evQoHTt2LOYrIoQQoqSSFhkhRImgUqlo0aIFkPvrv15aWhp79uzBw8OD2rVrF5rHyZMn+eeff+jUqZNBwGNnZ0ejRo24dOkSCQkJODs74+zszMmTJwHIysri8OHDdO7cmXfeeYc///yTrKwsIPfm+LXXXlOChYJUq1YNf3//QgOtomjbti2urq7K3+XKlcPJyYmkpKQnmlLZy8vLINCwtbVV6tTHx0cJ0lQqlRJE3blzByh6vRZVREQEly9fpn///koQA2BpaUnv3r1JSUlhz549AJw4cYLY2Fi6dOmiBDGQG/h6enoa5KvVasnOzqZcuXIG59OlSxc3KjSVAAAgAElEQVROnDhBq1atilxGIYQQJZ+0yAghSgwXFxe8vLw4dOgQvXv3pnz58ly9epWTJ0/StWtXqlSpUmj6M2fOkJCQgL+/v9Hn7e3tycjIwN7entq1a3Pq1ClSU1NJSUnh9OnTjB49mpSUFL7//nvu3r2LtbU1p06donbt2lSuXLnQY5uYmBTaYlNUarX6qeRTVGZmZo8Mvopar0UVGxuLvb290W6Cjo6O1KpVi8uXL5ORkUF0dDT29vZKy1lhnJ2dad68OZMnT+bEiRN06dIFd3d3rK2tqVChQpHLJ4QQonSQQEYIUWJYWlrSokULvvzySy5fvkz9+vU5fvw4KSkpvPnmm0W6wa9RowZjx47FxsYm33NqtZqqVatiYWHBG2+8wZo1a7h79y6XL18mOzsbZ2dnMjIySEpK4ty5c9SuXZuYmBjeffddLCwsnsUplxpFqdeiePDgAdevX6dChQqUL1++wP2ys7PJyckpVhkrVapESEgIa9euZenSpaxfvx6NRkObNm0YPnz4I1v0hBBClC4SyAghSpSWLVvyww8/sHv3bmrVqkV4eDjNmzc36G5VmOzsbFxcXB7ZFczT05M5c+YQExPD8ePHcXNzU1pdGjRowPHjxylbtizx8fH5ujD9GxW1Xh/FwsKCV155hVOnTnHv3r18LSU5OTlotdrHzt/S0pKBAwcSEBBAYmIiBw8eZM6cOfTp04dly5bJaymEEC8RGSMjhChR9GvKREREEBERwaFDh/D29sbKyuqRaW1tbYmPj+f8+fOP3Ld69eq4urryxx9/cOrUKVq0aIGFhYXSWnPq1Cn27t1LzZo1HzlT2suuOPVaFI6OjsTHxxtdMyguLo7Lly/j4uKChYUFtWrVIj4+nmvXrhWaZ05ODlFRURw7doz09HRUKhUVK1bE39+fefPmodFo2Lt371MpvxBCiJJBAhkhRImiX1Pm/PnzfPnll5QvX15ZO+ZRvL29sbe3Z+XKlQZTKOt0OsLDwwkLC1N+7bexscHDw4OVK1fy119/GXQ7aty4MXFxcWzcuBEPDw+j3amKwsLCgurVqxMdHU1sbKyyXavVcvjw4cfKM6+cnJx8a648C8Wp16Jo0qQJTk5O/PjjjyQlJSnb09LS+PHHHylbtixt2rQBclvHHB0d2bRpk8G6P7du3eLPP/9U/jYxMWH//v0MHDiQo0ePGhwvOzub9PR0ypQpU+xzF0IIUXJJ1zIhRInj5uaGs7MzZ86coXv37kVuEalZsyYjR45k4sSJ9O7dm/feew97e3t27drFtm3bCAwMRKvVolarUavVNG7cmBUrVuDu7m4wmNzBwQFHR0cOHDhA48aNUavVj30urVu3JjQ0lNGjR9OxY0fUajVHjx7l2LFjj50ngIeHB8HBwUybNo2mTZvStWvXJ54xrSDFqdeiqF69Op988gmTJk0iICAAf39/LC0tWbduHVFRUUyfPl3pSujg4MCHH37IhAkTuHbtGi1atODBgwf8/vvv3LhxA3t7eyXfbt26ER4eztChQ+nVqxfNmzcnKiqKNWvWUKVKFdq3b/9M6kcIIcSLIYGMEKLE0a8pc+bMGTp06FDkG3T9VLsuLi4sXLiQ+fPno9VqqVu3LiEhIbRs2RITk/9riK5Vqxb29vY0bdoUa2trZbu1tTVNmzYlNjaWWrVqPdG51KtXj3nz5vHVV18RHByMra0tPXv2ZPTo0coij4/Dx8eHwYMHs3LlSq5fv16seiqu4tZrUfj5+VGjRg0WLlzIrFmz0Gq1NGvWjNWrVxus2aM/trm5OQsXLmTBggU4OjoyevRozp07x/bt25U8q1SpwrfffktoaCibNm1i+fLl2Nra0rZtW4YNG2YQ9AghhCj9VLrn0S9BCPFM6HQ65aFfQyMzM5PMzEySkpKe+CZciJJKp9Mxc+ZMduzYwbJly3B0dHzRRRJCiHyio6OxsbFRprrXaDTKNPv6h3h8MkZGCCFEiaXValm2bBkXLlww2H7lyhX++OMP3NzcsLOze0GlE0II8SJJ1zIhhBAlVkZGBufOnWP27Nl0796dZs2acfHiRdavX092djZTp07F0tLyRRdTCCHECyBdy4QoxaRrmfg3yMzMZPv27SxevJioqChsbGzw8fFh1KhR//qpsYUQJZt0LXu2JJARohSTQEYIIYQouSSQebZkjIwQQgghhBCi1JFARgghhBBCCFHqSCAjhBBCCCGEKHUkkBFCCCGEEEKUOhLICCGEEEIIIUodCWSEEEIIIYQQpY4EMkIIIYQQQohSRwIZUaLFxMTg7e2Nk5MTYWFhBs89ePCAESNG4OTkxIgRI3jw4MFTP35YWBhOTk44OTkRERHx1PMXQgghhBCPx2ggk5OTw/nz55k4cSLt2rVTbuR8fHwYN24cERERaLXa513WEkGr1RIREUFgYCAbNmx4ZsdJTEyka9euz/QmXQghhBBCiNJK8/CGmzdv8vnnn7N169Z8O1+7do1r164RFhZGo0aNmDFjBk5OTs+loC+STqfj2rVrrF27ljVr1pCUlATAm2+++YJLJoQQQgghxL+TQYvMrVu3GDFihBLENGrUiLlz5xIaGkpoaCiLFi3C19cXjUbDsWPHGDx4MNHR0fkyzdsdaNasWc/nTJ6BtLQ0du7cSc+ePWnVqhXBwcFKECOk25UQQgghhHhxlBYZnU5HWFgYERERaDQaJk+eTK9evVCr1QYJfH19CQ8PJygoiNjYWBYsWMCMGTOwsLB47oV/1r777juCg4OVv11dXWndujVhYWEkJCS8wJIJIYQQQgjx76a0yCQlJbFv3z4AWrZsSefOnfMFMQAqlQpvb28++OADAHbv3s358+efU3GfPxsbGwIDA9m2bRu//fYbXbt2pUyZMi+6WEIIIYQQQvyrKS0yd+7c4Z9//gHAxcUFa2vrAhOpVCqaNGmCRqMhNTWVixcvUr9+/Wdf2uesb9++jBw5ElNT0xddFCGEEEIIIUQeSouMWq1Go8mNa+7du0dWVlahCd3c3Fi5ciWhoaHUr1/fYJYtX19f4uPjAQgODlbGUTw8fa5eYmIiISEh+Pn54eTkhLu7Oz169GDLli1kZmYaTaMfn9G1a1cSExP5+++/mT17Nj4+Pkoew4YN49ixY+Tk5DxW5VSrVu2JgpjY2Fg6d+5MkyZN+P333x87H2P05+/t7U1MTAxpaWls2LCBbt264eLigouLC926dSu0DiG3S+HFixeZOHGiUncuLi507tyZkJAQEhMTDfaPiIhQXs9x48Yp23v16mVQnoelp6ezc+dOBgwYgKenp/Ia9e/fn19//bXQMj5LOTk5hIeH079/f9zd3XFycsLPz8/ouRemJFzD8fHxzJ07l06dOtGoUaMil10IIYQQojRSWmTs7Oxwc3MjLi6O33//nbfeeovmzZsXmLBs2bI0bdpU+bs4N316Op2OPXv2MGHCBG7fvq1sz8jI4NixYxw7dgxvb29mzpxJtWrVjOZx8+ZNVq5cyfLlyw0G4mdkZLBt2za2bdvGkCFDGDFixHPvEnb8+HGioqIA2L59O61atXomY4ni4uKYMmUKhw4dMtgeGRlJZGQkXbt25bPPPsPS0tLgea1Wy7Jly5g9ezbZ2dnK9uzsbKKiooiKiiI0NJQ5c+Y80Y3x6dOn+fTTT7l48aLB9oyMDMLDwwkPD8fb25vZs2dTpUqVxz5OcaWlpfHVV1+xfPlyg+0XLlxg5syZrFu3Dg8Pj0LzKAnXsFarZd26dUybNo2MjAx0Ol1xqkEIIYQQolRSWmQsLS3x9/dHo9GQlJTEyJEjCQsLIz09vUgZVahQgZ9++onLly+zY8cO7O3tAQgMDOTy5ctcvnyZ7t27G6Q5fPgwY8aM4fbt29SoUYN58+Zx5MgR9u3bxyeffIK5uTnh4eHMnDmTtLQ0o8e9ceMG8+bNo3z58kybNo2ff/6ZjRs3MmbMGGxtbQEICQnhl19+ee43eM7OztjY2AC5LVjPIpC6efMmw4cP58SJEwwdOpTVq1ezYcMGpk2bRo0aNQD45ZdflPFPeW3evFkJYlq3bs3KlSs5duyYQf3Hx8czfvx4Ll26BECTJk2U1/PLL79U8lqzZg2XL18mPDwcZ2dnZfulS5cYOXIkFy9exNbWlqlTp3LgwAFOnDjB1q1bGTRoEBqNhvDwcObPn//cWma0Wi1LlixRgpgaNWoo109oaCjDhw8nJSWFX375pdB8XvQ1rNPp2LRpE5999hkZGRk0aNCAhQsXsmPHjqdQS0IIIYR4GlQq1YsuwkvJYB2Z9u3bM3bsWGbPns3t27cZN24c06dP591338XPzw8PD4+nNl7kxo0bzJgxg6SkJJo0acK8efMMfo0fMWIEDg4OTJgwgS1bttC2bVv8/f2N5tWuXTumT59OpUqVlG2enp60adOGjz76iNjYWJYtW0bTpk2pXr36Uyl/UXh6enLgwAG0Wi3lypV7JhdxdnY2Dg4OfP3119SrV0/ZXr9+fRo2bMigQYOIj4/nwIEDtGvXTnn9UlNT2b59O9nZ2fj6+jJ79mzKlSsHQKVKlRg5ciQNGjRQ6m/btm0MGzas2Oewfft2YmNjsbOzY/HixdSpU0d5rkKFCowfPx4zMzMWLVrE7t27ef/993Fzc3sKNVO46Oho1qxZA2D0+mvRogUdOnRQzt+YknANnz17ljlz5pCdnc0777zDxIkTMTU1JTMzk3v37j1RHQkhhBDiyUkQ8+wYrCOjVqv54IMPWLJkCa6urkDubGbLli2jW7du1K1bl/Hjx3P+/PnHHneit3v3bs6ePYuVlRWffPJJvi5FKpUKPz8/ZdHJ3bt3F7i6ff/+/Q1uAPXc3NwIDAwE4OLFi5w6deqJyvw4rKyssLa2fqYX8YgRIwyCGD1HR0e8vb2B3Bv31NRU5bnMzEylK5STk5MSxOTVoEEDpXthdHR0kVvn8mrbti2hoaF8++23uLi45HterVbTsmVLILd1qaCg4Wnbs2cPN2/exMrKirFjxxrt0ubm5sbgwYMLzONFX8NarZbNmzdz8+ZNXnnlFYYMGZKv+6AQQgghxMtK8/AGlUpFy5YtadGiBcePH2fFihXs3buXjIwMMjIyWL9+PevXr8fX15f//ve/Shey4khLS+Po0aNA7q/OeX+lz8vS0pLGjRuzfft2YmJiSE5ONjrGxNg00XpNmzbF2dmZmJgYIiMj8fPzK3T/0qigcSWmpqaUL18eyL3pzdstSa1WU7ZsWQCOHTvGjRs38o3hsLKy4vvvv3+isuknHiiqlJSUJzpeUaSlpXHhwgUg9/rL2xXuYQVdKyXhGr537x7Hjh0DwNvbmxo1aqBSqVCpVJiY5P5GYWzBWiGEEEI8PyYmJsr3s7TOPF35Ahk9tVpNkyZNaNKkCWlpaURGRrJ582Z+/fVXMjIy2LFjB9HR0QQHB1OrVq1iHfT+/ftcvnwZAHd3d6OtAXr6m+3ExESSk5Oxs7Mr1rEqVaqEm5sbMTExJCYmkpmZ+VIu3llc1tbWvPPOOxw5coQTJ07Qpk0b3nrrLaULYcWKFZWb4achLS2NnTt3sm3bNiIjIw0Gxj9vGRkZ/P3338Cjr7+ClIRr+J9//uH69esASrdPrVarfFBaW1uTlZVFdnY2ixYt4scff8TFxYUvvviC8uXLk5yczMSJE7lw4QKtW7dm5MiRRsdxXb16lQkTJnDr1i1Gjx5N+/btleeWLl3KunXrqFWrlpKvMXnz6NGjB4MGDQIgISGBSZMmcenSJbp168bgwYML/JDfvn07X3/9Nba2tkyfPl0ZA6bfDvDVV19Rt25do+n1ZXVzc2PatGnKFPNr1qzhhx9+oEqVKkyfPr3Y3U/T09P55ptv2Lt3b756/Pnnn1m0aFG+Mj8sOTmZSZMmceHChXz1sHPnTubMmYNWq8XMzAwfHx9atmyJi4sLNjY2T/w+XbBgAQsWLABXX2g5vEhpurxeji98bY0+dz8jh7ZLrnI/s4it9qG9IT2ZjRs34u7uXtRiCyFEiZD3Oyvvj4kajQaNRoOpqanRIEYCmqejwEAmL0tLS7y8vPDy8mLMmDEsXLiQNWvWEBsby9SpU5k3b57RbjEFuX//vjI70+LFi1m8ePEj09y4cYO7d+8W+Rh6eX+dfvDgwRN3iXuZdOnShcqVK/O///2PuLg4Nm7cyMaNGwGwtbWlW7du9OrVi1dfffWxj6HT6Th69ChjxoxRpuR+0VJSUpSWH7Va/VgfJiXhGk5JSSEhIQGASZMmMXHiRCWNTqdTWuD0/6tUKpKTk0lLS8PW1haNRqPkq1arMTMzw8zMLN/xTU1Nlf00Go2yT3Z2ttJd0dTUFHNzc6PpH85DfyzI7eKoz2PDhg1s2LDhkfVx+/Zt0tLSlDz008brj1NQGfStXiYmJgb7ZWRkAFC5cmVsbW0LTF+QnJwcJe+H61F/jdy+fbvQbop53bt3D51Oh7m5OQB+fn5UqVKFuXPnEh8fz86dO9m5cyeQG+R26NABf39/qlatWqxy6ymtgSoTUBdtDOTPF9LpXk9L41fz/yi0+ngi97VqKHLLd+77r7DXTgghSoO8gYx+WRO1Wo1arZZWmWekSIFMXnZ2dkyePJmyZcuyaNEiDh06xNGjR+nYseOzKN9T9eDBA7Ra7YsuRomh70b4+++/c/z4cbZs2cK+ffu4ffs2t2/fZtGiRSxbtozRo0fTr1+/x7rJOHnyJIGBgSQlJdGoUSOCgoLw9PQ0aBWLiIigV69eT/PUXlrFuYZVKhU6nS7fL0D6G20zMzPlg9XExKTQQEb/wZs3kNF/UD8q/cN55A1k8m4vjrzl0Gg0Svq82x9WUFmLeg4FycnJKbAeHydQNlaOFi1a0LBhQ06fPs2uXbs4fPgwCQkJ3L17lx9//JENGzYwZMgQunXrVuwJWQwCGZOin/sXfyTze4CNwbb45GzmH0kpVj56EsgIIUoz/We9/rsgbzCj//vh7wMJaJ6cBnLHT5w7d47k5GTMzc2pW7duoV8oarUaPz8/1q9fT0JCAufPny9WIGNubq7cyI4ePZrevXsXKZ2+i05x6HQ65RfsihUrPrVZ114mZcqUUVrcIPcX4fDwcObMmUNcXBzTp0/HxsYm3/TZj5KVlUVYWBhJSUk0aNCA+fPnF7tb1bNQtmxZ5VrSjx0q7odJSbiGzc3NsbKyIjU1lW+++YYWLVqg0+nQarXk5OQY/K/PQ6VSYWVlpQQT+g9WtVqNubm50gqQV979NBqNso+5uTkVK1ZUAiZ9q4wxBeVRtmxZLC0tUalUfPjhh3Tt2rVIdWJlZaXUQ95AxszMrMAy6PfTBwr6/WxsbFCpVCQmJpKamlrstYx0Op0SsDxcj/q8HR0d+eKLL5TptAujH7/28DVpbm5u8D5NSkoiIiKCRYsW8ffff7NgwQIqVqxI586di1V+g0BGU/TPx7MJOjb9lUGPutbKttHb7xQrj/9/YIBCrx8hhCgN9J/beb9b9QFNQcGMeDIayP1Fcc2aNaxduxZnZ2eWLl2Kg4NDoQnNzc2VfuB5F1MsChsbG6pWrUpMTAx37tzB2tr6mQ3AT0hIUAZ229nZPfdFMUuq1NRUMjMzUalUlCtXzqD+y5cvj7+/P40bN+bjjz/mxIkT7Nq1Cz8/P6ysrIp8jJSUFKXu69ev/1wXuyyMubk5r776KidPnuTKlSukpaUV67ygZFzDlSpVolq1asTExJCQkEDFihWVgOXhR96uZpB78/1wIFPcFhlAmWDg1q1bJCYmFhioFtQiU7lyZapWrcqVK1dISkqiUqVKxa7HJ22RqVmzJiqVijt37nD//v0C0+t0OlJSUpSxKvoZ4vRdy4zVoz7v5ORk1Gp1sd8DaWlpyvu0bNmyBnVja2vLW2+9RePGjfn00085efIkBw8exM/Pr1iz1xm2yBQvCPnv7nu8Xaci1mVMOBj3gEPXsoqdh560yAghSrOCxsrkfUjXsqdPA7lfIJ6enqxdu5aYmBiOHDnyyEAmPj6emzdvAhTpV8a8ypUrh4eHBwcOHGD//v3Ex8cXOsA2KyvrsVtSTp48SUxMDAD16tWTi+f/27JlCxMmTKBSpUosX76c119/Pd8+VapUoX79+pw4cYLbt2+TmZlZ7Bv+okhOTn7qeRbG0tISNzc3fv31Vw4dOsRff/1F/fr1je5bUDeuknANV65cmdq1axMTE8OePXvo0qWLMvg774eoPrjJzs42GE/ycCBT0I2kfnyLsUDm9ddfx8rKisTERA4fPoyHh4fR95j+uA/nUbFiRerWrUtERASHDx/mzp07hX72GKvHvIFMYTfDD7fI6Pfz8PDgtddeIy4ujuPHj9OkSROjwdStW7f4+OOPOXv2LEFBQQwZMgTIvUbyBjJ5y+Dq6oq9vT3Xr18vtH70+YDhDHabN29mypQpVKxYkZCQEKOD4e3t7alfvz6nTp1SxmAVJyAwHCNTvEAiKQtmH0zmy45V+OjXv4udPve4uf9JICOEeFnkDVj03zsPBzFyP/p0KNPdtGrVisaNGwO5M//s3r073yriejdu3CA4OJjs7Gzs7Oxo1qyZwfP6PoGQf9pfyH3x/P39sbOzIzY2lq+//prExMR8x9HpdERERPD222+zY8eOAgfqr1u3zui0vdHR0cydOxfIvVkp6Gb1WUpNTSU5ObnAunxR3NzcsLKyIiEhgd9++43MzMx8+9y6dYvIyEgAqlevnq81K+9NsbEb/rxTPO/atUu5GdfT6XRERkYyc+bMJz6f4vL19cXe3p7U1FRmz57NrVu38u1z4cKFAgfxl4Rr2MLCgi5duqDRaDh06BDBwcFkZGTkG2yoUqnYtm0bffr0ISoqymAAYt4PVP0MK8Ye+v30afUPd3d32rRpA8CKFSs4fvx4vrTp6eksXbqUf/75ByDfbC76eoyLi+Pbb7/l/v37+fJQq9VERkbSo0cP9u3bZ5BH3hv/h8uX96GfbEAfcOi3V69eXTmHtWvXEhkZafT8N27cyNmzZ7Gzs8PHx8do/Txcj87OzsosbytWrGD//v1Gy5iens6sWbOYPHky9+7dU7bXrl0bKysr7t69q1w/D6dNTExU1heqXr06VlZWhb6WBdULKhWYqIv9+C7iPh/+coe4JN1jpddHMoW9dvKQhzzkUdoehQ3ylyDm6VHuRG1tbRk9ejSjRo0iPj6ewYMH4+3tTffu3alUqRIVKlTg1q1bHDlyhDVr1iiz8fTq1Svf9MuVKlXCwcGBuLg4tm7diqurK3Z2dpQrV06ZGtXV1ZXRo0crq56fPn2awYMH4+XlRbly5YiNjWXt2rVs2rSJ7OxsoqKiaNWqldFf7DZt2sSVK1cYPHgwDRo0IDs7m/3797NgwQLi4+PRaDT07dv3uXdt+vPPPxk4cCBJSUl8+umnDB06tMRcvB4eHgwaNIj58+cTHBzMX3/9RUBAAO7u7mi1Wk6cOMHixYs5efIkGo0GX1/ffNNWOzo6KmM0li5dSkZGBmq1Wnm9ra2t8ff358CBA8TGxjJkyBD69u2Lq6srKSkpbNu2jd9++63YXROfhpo1a9KnTx9mzZpFREQEPXr0YPDgwTg7O5OVlcXBgwcNrnNjSsI17OXlxbBhw5g/fz6LFy/m2LFjBAQE4Onpibm5OefPn2f58uXs27cPjUbD5cuX8fT0NAhy8gY9xloi8gYA+v30ypYty4ABAwgPDyc5OZnAwED69euHl5cXpqamxMTEsGrVKi5evGjwAZ43D3d3d0aNGsWkSZPYunUrUVFRfPDBB7Ro0YJy5cpx5coV1q1bx+bNm8nOzubcuXO0bt3aYBayvIFWQV3T8n6R5N1PrVYzePBgzp07x9GjRwkMDOSDDz6gU6dOVK5cmStXrrB48WJ27tyJSqWiZ8+evP766wbp8w72LyzvoKAg3n77bbp27UrNmjXJyMjgwIEDfP/991y9epXXXnvNoIveG2+8wcCBA/nuu+9YvHgx0dHRDBgwADc3N7RaLZGRkSxZsoRTp05hampK+/btiz0O6/+mb1bBY07lvPLk/cdOq1fYayeEEKWRsemWS8p94MvCYNayRo0aERoaypQpU9i/fz/h4eGEh4cbTWhubs5///tfevToke/LJ+8aJfHx8YwZMwaAwMBAJZBRqVR06dIFgC+++IK4uDgmTZqUv4AaDQMHDiQwMLDAbgedO3fmjz/+4KOPPjKa/uOPP6ZLly7P/eKJiYlRboQvXLhAenp6iVnDRq1WM2TIEJKTk1m+fDl79+5l7969+fYzNzdn7NixBmuH6Lm6utKxY0c2bNhgkH7NmjXKjZi/vz+XLl0iJCSEuLg4pk2bZpCHra0tH3zwAcuXL+fGjRtcu3btGZxtfiqVigEDBnDv3j2lbA9ff46OjgQEBCgtIsbyeNHXsFqtJjAwEI1Gw/z58zl58iRBQUH58jA3N2fMmDG8++67yvvV2Owpxt4jD/+K9PA+9erVY9asWYwbN46kpKR801FrNBqGDRvGrl27lIDm4Q93/blNnz6dq1evMnnyZKP1EBAQQGBgoMGg8KLOAmOs/7KenZ0d06ZNY8KECRw/fvz/1lZ56PijRo1iwIABaDQao/kWlPfs2bP53//+x759+wymOc+revXqzJw506D7mEajYciQIdy/f58VK1awb98+9u3bly+t/vVt377943/OqZR/Xhj5ghdCvCwK+j4VT1e+6Zdfe+01li1bxl9//cXGjRs5ePAgly5dIjs7GxsbG1xdXenQoQOdOnUqdGzM22+/DeQuthYbG4utrS02NoZTdarVarp164a3tzdhYWFs3bpVGdTs4OCAl5cX/fv3p1atWoW++L169WLUqFGsXLmSnTt3cu3aNczNzWndujUBAQE0aNDgqS7uWFQNGzakTp063Lx5k/bt25eYIEbP0tKSSZMm0b17d1atWsWBAy2KkrcAAAI8SURBVAeUQKJmzZr4+vrSo0cPHBwcjNa/hYUF//nPf7C2tmbjxo0kJSVRs2ZNg5vMMmXK8Omnn9KiRQuWLFnC0aNHycjIwNHRkS5dutCzZ080Gg07duzgxo0b/PXXX6Smpj6TsTgPK6hsbm5u+Pn50bNnT27evEloaKiyXsvDSsI1XKZMGT766CPeeustVq1apaSH3NexXbt2RtcDMhagFOWD19jf7dq1w9XVlZCQEHbt2sXt27extbWlVatWvP/++9SoUYPo6GguXrxoNA+NRmNQj7///nu+euzXr98j67EogygLOteaNWuyYsUKdu7cycaNG5XrwcHBgbZt2yrnUVjgUlDer776KiEhIRw5coTVq1dz5MgRkpKSlFkiu3XrVuAgfSsrK+V9unr16nzv03bt2hX6Pn2U/0ujyu1e9oLIAFghxMtIPteeLZWupA3eKKKwsDDGjRsH5LYANGnS5AWXSIjikWtYlARTpkxh6tSpUPdd8Jv26ARP27zm8CCRyMhIPD09n//xhRBClFrPv5lCCCGEEEIIIZ6QBDJCCCGEEEKIUkcCGSGEEEIIIUSpk2+wvxBCiH+h6D2wvPvzP+6D/OsvCSGEEEUhgYwQQojcgEKCCiGEEKVIqZ21TAghxJO7cuUKV65cedHFoGHDhsVezFMIIcS/mwQyQgghhBBCiFJHBvsLIYQQQgghSh0JZIQQQgghhBCljgQyQgghhBBCiFJHAhkhhBBCCCFEqSOBjBBCCCGEEKLU+X8FBTEJwAMl0QAAAABJRU5ErkJggg==) - `Hardware Accelerator > GPU` ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAlcAAAI5CAYAAACMxR+uAAAgAElEQVR4nOzdd1gU1+I+8HfZRYogCiogoBRBEVSwYC/ERIzXii3GaxQ1IUUTTXITNd5EjfGbaIzXFGOPxKiJLRbU2AALqFGwIDZEughIkw5bfn/w2wkDCyy4Cuj7eR6fR2ZnZs+U3Xn3nDNnJH5+fqrx48fDy8sLLVu2BBERERFp79GjRzh37hx27twJmUwGyZ07d1QuLi71XS4iIiKiRu3WrVtYtmwZJCqVSlXfhSEiIiJ6Hhw8eJDhioiIiEhX0tPTGa6IiIiIdEmvvgtARERE9DxhuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuCIiIiLSIYYrIiIiIh1iuGqAVqxYAUdHRzg6OmLu3LkoLCys7yLV2fO0LURERNpocOGqsLAQc+fOFS7IAwYMwN27d2tcbvfu3aJl7t279wxKS6Q7ycnJ+OyzzzBjxgyEhYVBpVLVd5GIiKgOGly4qig5ORmbNm1ijQc910pLS7F+/Xrs3LkTISEhWLx4MZKSkuq7WEREVAcNPlwBwP79+3HhwoX6LkalWjU2c73YsrKy4OvrK5wPK1asqPN8SqUSBQUFwt+lpaWQy+VPrexERPT0NIpwJZfLsXbtWqSmptZ3UYieCgMDA7z22mvo0KEDDAwMMHXqVLRt27a+i0VERHUgq+8CaCs8PBx//vkn/P39IZFI6rs4RDrXo0cPHD16tL6LQURET6hR1FypBQQEICoqqr6LQURERFSlBh+u2rdvD2trawBAamoqNm7cKOqbUlsqlQp3797FZ599hkGDBsHR0REuLi4YNWoUNmzYgPT09ErLXLx4EY6OjnBzc8PBgweF6QcPHoSbm1u1/W0SEhKwbNky0XuNHz8eW7duRVZWllZlLikpwaFDhzB+/Hi4uLgI69izZ0+N+6Iu26tJUlISVq5ciVdeeUXoO/TKK69g5cqVSEhIqPOdbWlpaZg8ebKwziFDhuD27du1WkdJSQlCQkLwzjvvwNPTE46OjnB1dcWkSZO02s9KpRJhYWGi5T09PfHOO+8gJCQEJSUlovnVd6Z2794dV69eFaavW7dO2I7du3drPZ9a+WErfH19ReVWn4PlX3v06BFWr14tHFdPT0/MmTMHN27cqPZ41HQ+/fLLL1r1I9u6dauwDkdHR/Tq1avKfUZE9CJp8M2CnTp1Qv/+/bFw4ULI5XIcPXoUPj4+GD58eK3XVVBQgO+++w6//vqrqLOwXC7HjRs3cOPGDfz8889YvHgxRo4cCT29umfPkpIS/Prrr1i1ahWKi4tF7xUREYGIiAhs3rwZ3333HXr27FnlerKysvDxxx/j1KlTounqdRw6dAhff/21EEB1vb0KhQIBAQFYuXKlaDsAICYmBj///DM2btyIjz76CH5+fmjSpInW+ygtLQ3/+c9/cPHiRQCAmZkZvvzyS3Ts2FHrdaSkpGD+/Pk4e/asaHpxcTEuXbqES5cu4eeff8aqVavQr1+/Sk3Kqamp+PLLL3HkyBHR9JycHBw7dgzHjh3DK6+8guXLl8PCwkLrcj1t586dw7Jly0ThOCcnB4cPH8axY8ewfPlyjBs3rtL2ZmRkYOHChThx4oRoevnzydjYuMr3ValUCA0NxUcffVQpmKenpwv7bMCAAVWel0REzzvp4sWLF9d3IcqTy+U4deoU7ty5AwDo0KED/Pz88OjRI9y8eRNKpRLx8fHw9vaGqampsNzNmzdx8uRJAECzZs0wZswYmJubC68XFBTg888/x44dO6BUKgEAMpkMzs7O0NPTE2qAiouLcerUKbRp0waurq6QSCQoKSlB8+bN0a1bNzx+/BgZGRkAABcXF0ycOBF9+/aFu7s72rdvD6AskPz000/49ttvoVAoqnyv3NxchIeHo2/fvqILd2hoKC5fvgygrOYrNja2yv2VkJCA7Oxs9O/fH/r6+jrZXjWFQoEffvgBK1euFLYDAJycnGBiYoLHjx8DKKv5CQ0NhZ6eHnr27CkKaeW3pUOHDnj55Zehr6+PgoICLFu2DMeOHRPKtmTJEgwdOlTrPnW5ubn47LPPEBwcLKzDw8MDI0aMgIGBAR4+fCjchRcWFobevXujdevWwvJpaWmYO3cuQkJChGlmZmZwcnLC48ePhW2+f/8+MjIyhH1cWFgIa2trdOnSBcnJycjPzwcAdO/eHRMmTECvXr3QtWtXtGjRQqv52rRpU2lfWVlZYfTo0TAyMgJQNiTJ3r17AQAPHz7EX3/9VWWtpVKpxL179+Dt7Q0zMzNhekFBAb788kscPny4yn2akJCAmJgY4e8ePXqgX79+wt9RUVGYM2cO0tLShP01ePBgDBo0CNnZ2cjOzhbWk5SUhEGDBsHAwKDK9yMieh41+GZBADA0NMQbb7wBS0tLAGVf8Dt37hRd8GsSEhIiatJ78803ER4ejqNHj+LChQs4evQounbtCqAs4K1Zs0a4yKiHXZg9e7aoVqVjx46YPXs25s6dCx8fH2H6xYsXsXnzZgBlF/xPPvkE169fF97r5MmT6N69OwAgNjYWmzZtqrYZxdvbG0FBQYiJicH9+/dx/vx5jBw5Unj94MGDooDwpNurdv36dQQEBAh/jxw5EufPn8eJEydw+vRphIaG4pVXXhFe37x5s1ALVR2FQoENGzZg3759wj5atmwZfH19a3WzQkREBIKCggCUXeQ3bdqE3bt3Y8GCBdi+fTsOHjwIBwcHAGU1VAcOHBDOGYVCgd9++00or42NDbZv347w8HAEBgbixo0bWLt2rRBO9u3bJ9Qe9ujRA3PnzsXbb78tqpnp2bMn5s6di7lz56JHjx5az1cXNjY22Lx5M6Kjo3H//n3cvHkT7733nvB6bGwswsPDRctcvHhRdE6UP69iYmIQFBQEb2/vKt9ToVDgwIEDwl27vXr1wpEjR/Dzzz9j0aJFOHHiBL7++mvIZGUV4kFBQYiIiKjT9hERNWaNIlwBgJubG6ZNmyb8vW3bNly/fl2rZR8/fozff/9daBobOXIkPvjgA6HmSyKRoEOHDvjmm29gY2MDoKym4NixY7XuS1RSUoIDBw4ItRRTp07Fm2++CUNDQ+G9HB0dsWDBAjRt2hQAEBYWhoSEhCq3e9myZbC3txeCh6WlJRYuXCgENLlcjnPnzqG0tFRn21taWordu3cjJycHQFlty8KFC4WACwDW1tZYunSpUI78/HwcOHCg2qCoUqmwd+9erF27Vpj27rvvYty4cbVuhk1LSxO20cHBAZ07dxaFM3VNmVpCQgKKioqE7VXX4MhkMixduhR9+vQRyiCVSuHj44NZs2YJy586dapBjGvWtGlTfPXVV/D29oZUKgVQ9gPk3//+Nzp16iTMFx0dLfy/pKQEf/31l7C/Kp5XEokE9vb2WL58uXA8KyopKRE1BXp6eopCo56eHoYMGQJXV1cAZefl/fv3dbfhRESNRKMJVxKJBOPGjYOXlxeAsv4l69evR25ubo3LJiYm4tatW8LfY8aM0divxMnJCcOGDRP+vnjxolbrLy81NRVXrlwBUHbRHjp0qHABLK9du3ZwdHQEUHahVzezaCpT8+bNK023tLQU1TJERUUJzXS62N7MzExReB0+fLgoWJUvx+jRo4W/r1y5Uu14ZGfPnsWXX34pXOR9fX3x1ltvadxHNTExMRH+Hxsbi8jISFEYlkgkWLBgAe7fv4/79+9j/fr1QqCNiooSmlu7dOki1OKVJ5FI4OnpKXqPJ7mZQleaN28uNCWW16xZM6FpuqKsrCzRnbbDhg2DlZWVxnWoA3dFMplMdB5duXIFKSkponksLCxw4MABYZ/7+flptU1ERM+TBt+hvbxWrVrB398fERERkMvlCAoKwokTJ+Dr61vtcgkJCUI/KRsbG9jZ2WmcTyqVolu3bkKTXmJiIjIyMtCsWTOty/jgwQPhuYZyuRyTJ0/Warnk5GSt30NNXUMAlIWhrKwsWFhY6GR7U1JSEB8fL8xbvkakInd3dzRt2hT5+fmIi4tDamqqxvf8+++/ERoaKtTqeXh44D//+U+1Hair4+HhATc3N0RFRSEnJwfTp0+Hg4MDRo0ahX79+sHFxaXKYxcZGSn8PyIiQqvmufL7uLF5/Pix6O5DT0/PWo8Xp6+vj1deeQV79uyBXC7HxYsXMWjQIPTs2ROjRo1Ct27dYG9vL+r7R0T0Imo0NVdq/fr1w8SJEwGUhZcNGzZU2aSmlpeXJ/y/VatW1V4cy3eCl8vlterXBZRdgJ+VqkKJLra3uLhYCEE2NjbVrsPExESoXatunz18+FAIfUBZTVDFfl61YW1tjZUrV8LFxUW0zjVr1mDixInw8PDApEmTcObMGVGZSktLhY7XL4rMzExRLVNdagoBYNCgQVi+fLnQSV0ul+P8+fNYsGABfHx80KVLF8yfPx9xcXF88DQRvbAaVc0VADRp0gRvvPEGTp8+jeTkZNy9excBAQFwcnKq76IBEAebpk2bYuzYsaIAUxV1x+sXSU5ODlavXg0nJyeNTY7a6NixI/bv349jx45h8+bNuHHjhuj1S5cuYfr06fD19cVnn32GFi1aQC6Xi5r3XFxcMGTIkBprXAwMDER3qL6I9PT0MH78eAwYMAA7duzA77//LuqHVVxcjF27duHAgQP473//i0mTJtU5yBERNVaNLlwBgLOzM2bNmoUlS5YAKLuTS1OfGbXyfXOysrKQnZ2NFi1aaJz3wYMHwv9lMlmtLwwVnwfn6+sLDw+PWq1DW1X1/9HF9hoYGAhNfampqdXW9GRnZwv9rGraZ+rhK3744Qfk5OQgPDwcGzZswMKFC+t8ETY0NMTo0aMxevRoFBYW4s6dOzh9+jR27dol1Nbs27cP7du3h7+/P4yMjER9llq3bg1/f/9aNf82NhYWFrCxsalT87MmlpaWmDdvHubOnYusrCxcu3YNR44cQWBgIIqLi1FcXIyVK1fC1dVV1G+NiOhF0OiaBYGyjsajR4/GgAEDAJTVgJw5c6bK+a2trYWOzPHx8YiLi9M4n0KhwM2bN4W/7ezsat2/xsTERFgmPz8fERERT615pHyndXNzcyFA6WJ7LSwshDvB5HK5qI9SRbdv3xY6qNvb21dZC+Xg4IAffvgBfn5+8Pf3F6Zv27YNBw4cqHF7y1MqlcjOzkZWVhaysrKEOyWNjIzg4eGBDz74oFJ/vFOnTgl3P7Zq1UqYfuvWrSr30fPC1NRUNOZV+eOurdLSUmF/Z2dnQ6lUQiKRwNzcHN7e3li5cqVo+IucnBxhDDIiohdJowxXQNkdUzNnzhRCRHXs7e1FNVv79+/XWOsTGxuLv/76S/i7V69etW4GcnBwEHWO3r59u9DBvaKCggJcvHix2n5dt2/f1viImtTUVNGFy83NTah50cX2Wlpaimocjhw5ovEuwPT0dAQGBgp/e3p6VhmuOnfuDFtbW0gkEkyZMgVDhw4FUBbefvrpJ9HQATXJycnBjBkz0L17d3Tv3l0YYLM8Y2Nj9OrVS/hboVAIQbdPnz5COTMyMvDrr79WWROYkJAgDGrbWJmbm6NLly7C31Udz8ePH1dZuxURESHs73/9618a+8vZ2tqic+fOwt/lnwxARPSiaLThChB3bq9O8+bN4evrKwxueOjQIaxZs0Y0zEJcXBwWLFggXFhsbGzg4+MjuqPK0NBQFBxiYmJEHbSBsgv6yJEjhfeKjY0VBhFVX9iVSiVu3bqFOXPmYOrUqdiyZUuVAevu3btYuHAh7t+/LyyfmpqK5cuXC4NEymQy+Pj4CH2GdLG9TZo0ga+vr1DbER4ejuXLl4suyI8ePcKyZcvw999/AyjrYzZ69GitHoFjamqK2bNnC/szNjYW69ev13qog2bNmonGY9q4cWOlZxLm5eXh/Pnzwt+WlpZCR2z1cwzV9u3bh//7v//Do0ePhGlFRUX466+/MGXKFMyaNQuXLl0Srd/AwAC2trbC3zExMULNWF3me5r09fXh4+MjnBPh4eFYuHChcDOISqVCXFwcFi5cWGnwUTV7e3shOKWkpODnn38W9TEEympKy9dyahoygojoedco+1ypSaVSvP766wgJCan2ETFA2bg+YWFhwqjgGzduxC+//AInJycUFBQgMTFRmFcmk+GDDz6o1EleIpGIxhGKiorCwIEDAQBvv/02PvnkEwCAj48P3n33XXz//fcAgGvXrmHMmDFo1aoVrKyskJCQILq4BgUFYfTo0aJHs5QXFhYmGgyzolGjRlV6PqEutrdHjx6YNm2asB2HDh3CoUOHhPkq1lzMnDlTVFNUEzc3N8ybNw/z588HUBZwvLy8MGHChBqHCZBKpZg4cSKCgoIQGxuL2NhYjBo1Ch07doSXlxeSk5MREREh1PrJZDKMHDlSuMOySZMmeP/99xETEyOM0r59+3Zs374dTk5OkMlkiImJEdW8hIaGolu3bkLfMCMjI9GQEydOnBBq+7755htMmDChVvM9bV5eXhgzZgz27NkDAAgODq5Vs13r1q0xdepU4Tmf+/fvR3BwMLp27YoOHTrg6tWruH79uvAMShcXF+HzQUT0ImnUNVdAWQ1E+f47VTE2NsbSpUsxY8YM4de7XC7HnTt3REHDzMwMK1eurPJRLIMGDaoxQEilUrz99tv48MMPhfcCyprQIiMjRcHK19cXa9eurTJYTZ48udpxvAYMGIBPPvmk0rAMutheqVSKOXPmYNGiRaLnw6kfl6Imk8nw6aef4t13361Vp3SJRIIRI0aItm/16tWiwS6r4+zsjNWrVwtDMagfSL1lyxYcO3ZMCFYGBgZYvHix6BFFQFlY+Pbbbys98iUmJgZ37twRgpWBgQGWLVuGOXPmiLZPIpFgzJgxNd7pqe18T5uRkRE+/fRT0SOLKurVq1eVN2BIJBKMHTsWixcvFs4HdX/HjRs34tKlS6JgtWLFiko3eBARvQgafbiSSCR49dVXhf471TE2NsZnn32GwMBATJ48WahNkMlkcHd3x/z584VapKoexaK+IL/22mtCk5m6Rqo8Q0NDzJ49G0FBQfjggw9EzyS0s7PDhAkTsH//fqxYsaLKO/mAsua65cuXY82aNejWrRtkMhlkMhm6deuGFStW4Oeff64ymOlie6VSKWbMmIETJ07gnXfeEdVuOTk54Z133kFQUBD8/f21ag7UVEZ/f3/RMwC/+uqrKkesr6hLly7Yv38/1q9fj4EDBwrHRL2N6o7tr7/+usbgZ2Njg40bN+KPP/7AhAkThI7u6uU//PBDhISEVLm8s7MzfvjhBwwbNkwIHJpuhNB2vqfNwsICP/zwg8bzac2aNVi3bl21gUhdW3zmzBnMnz8f7u7uQng3MzPDwIED8e2332Lfvn2iPl5ERC8SiYoj/RHR/5eRkYEZM2YI/aY+/vhjvPvuu/VcKiKixqXR11wRkfbS0tIQEhICpVKp8fXr16+Lhvgo/4glIiLSDsMV0QuisLAQ3377Ld566y18/fXXomcNKpVKXL58GcuWLRP6mnl5ecHd3b2+iktE1GixWZDoBREeHo63335bNHyIk5MTTExM8ODBA9F4amZmZvjpp5/Qt2/f+igqEVGjxnBF9IJQqVS4dOkSPv30U8THx1c5X6tWrbB8+XK89NJLNQ6JQURElTFcEb1gioqKcP78eezatQsXLlxATk4ODAwM0KVLF4wYMQJjxox54R9QTUT0JBiuiIiIiHSIHdqJiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHZM/iTTLtnJ/F2xDRc8A8Mbq+i0BE9ERYc0VERESkQwxXRERERDrEcEVERESkQwxXRERERDr0TDq0a8JOq0TEm12I6HnEmisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhisiIiIiHWK4IiIiItIhhqtaunfvHgYMGIAVK1bUd1EIwMWLF+Ho6Ijdu3fXd1GIiIgANMBwpb5YVhdeCgsLMXfuXAwYMAD37t17hqUjIiIiql6DC1dEjRVr0YiICGC4IiIiItIphisiIiIiHXruwtXjx4+xYcMGDB8+HI6OjnB1dcW7776LmzdviubLysqCr68v5s6dizNnzmDUqFFwcXHB5cuXAQAqlQphYWGYNGkSHB0d0atXL6xevRqZmZmi9aSmpmLEiBH4+OOPUVxcLExXKBRYsmQJhgwZgoSEBNEywcHBlZqPtC23ukP9N998g8OHD2PIkCHw9vZGbGysME9CQgIWLlwIT09PODo6YtSoUTh06BBKSkpq3H8qlQrh4eGYM2eOsPyQIUOwZcsWFBUVVZq/pKQEhw4dwvjx4+Hi4lJluWs7r1KpxNmzZzFlyhTRvDdu3IBKpapxO7TdDzXtT22Oi7o5cPLkyQCATz/9tNLxrbg9Li4umDJlCs6ePQulUikq9+7du+Ho6IigoCB8/fXX8PT0rHR+ERFRw/Vchau0tDT4+/tj9erVcHFxwfvvv48pU6bg2rVrmDJlCi5evFhpmb///hsfffQRUlNT4erqCj09PahUKuzduxfTp0/H3bt3MWXKFIwbNw7Hjx+Hn58fUlNTheXNzc3RpUsX3L59G9nZ2cL0R48eISIiArGxsYiKihK955UrV2BjYwNPT886lzswMBCLFi2CXC6HnZ2dMD0sLAwTJkzA6dOnMW7cOLz33nswMjLCBx98gLVr10KhUFS5/9TbPXnyZNy7dw+TJ0/G+++/D3t7e3z99df4/PPPUVhYKMxfWFiIb775Bh988AGkUin8/f0xefJkhIeHY8qUKQgLC6vTvAqFAj/++CNmzpwJuVwOf39/YX9MnToV58+fr3Ib6rofNO1PbY+Ls7Mztm3bhkWLFgEA3nzzTWzbtg29e/eutD2ZmZmYNWsWZs2ahczMTMycORM//vijxuPy9ddfY8uWLbCyskKzZs0qhTAiImqgVM9Ahm37Sv+qcuHCBZWDg4NW//r376+Kjo4Wlv3tt99UY8aMUZ0+fVq0zsjISFXv3r1VCxYsUJWUlKhUKpUqMzNTNXbsWJWzs7Pqjz/+UCkUCmH++Ph4lY+Pj+qll15S3b17V5heXFysWrVqlcrBwUH1zTffCNN37dqlcnZ2Vl24cEG0HZ06dVJ16tRJ9L55eXmqt956S/XWW2+p8vLyal3u6OhoVf/+/VUeHh6q06dPq5RKpTB/WlqaatKkSaq3335blZmZKUyXy+WqFStWqNzd3VXh4eFV7vsHDx6oZs2apVqwYIHq8ePHouWXLl2qcnd3V125ckWYfvjwYZWzs7Nq1apVquLiYmH6/fv3VcOGDVNNmTJFlZWVVet5Q0NDVe7u7qqtW7eq5HK5MO/Dhw9V48ePF82rPl927dpVp/1Q3f6szXGpqizlt+ejjz5S5efnC9Nzc3NV8+bNUzk7O6tCQkKE6bt27VI5ODioXnvtNdWDBw80HKnnR22+G4iIGgtZfYe7qnTv3h39+vXT+FppaSlOnTqF/Px80fQpU6ZgypQplea3sbGBtbU1CgoKIJfLoa+vL7w2bNgwjBgxAnp6/1TiRUVF4e7du1i6dCmcnZ2F6U2aNMGoUaPw559/itbv6ekJS0tLhIeHo1evXlCpVDhz5gz69++PTp064cSJE8jMzISlpSXS0tJw9+5dTJgwAU2bNq1zuSdOnIgBAwZAIpEI0y5fvoy///4bmzdvRosWLYTpUqkUw4cPx65du3Dt2jV069ZN4361trbGxo0bK02XSqXo2LEj8vPzhaapwsJCHD9+HHZ2dhg/fjyaNGkizO/g4ICJEydi7dq1SE5OhoGBgdbzNm3aFIGBgWjXrh2GDRsGqVQqzGtpaYnhw4cL8zZv3lzjdtRlP2jan3U5LhWVlpYiMDAQJiYmmDVrFoyNjYXXTExM8NZbb+H8+fM4duwY+vbtK1rXnDlzYG1tXeW6iYioYWqw4apnz56YO3euxtcKCwuRnJyM8PDwSq9lZWVh9+7d2L9/P27fvi16rW3btpXm19PTE11QASAyMhIARMGqOtbW1ujUqROuXbuG/Px8lJaW4tq1a+jbty8GDRqE3377DTdv3oSlpSXi4uKQnJyM7t27P1G5pVJpleWeOXNmlWVNT0+vdluUSiXCw8Px66+/4ty5c8jJydE4X1FRERISEtC+fXtYWFhUet3Pzw9+fn7CttVm3tu3b+PmzZvo06dPleXMy8ur8rW67AdN+1NdntocF03lvH37NpydndGmTZtKr9vZ2QnNynl5eZXCIBERNT4NNlzVRWxsLN555x1kZWVh8uTJmD9/PqRSKXJzc7Fy5cparcvGxkZjENCkadOm6Nq1K3bv3o20tDTk5OQgOu+4zxcAACAASURBVDoaH3/8Mezt7dGtWzeEhYVh4MCBuHz5Mrp06SIKbrosd9OmTTF27FiYm5trfN3V1bXKZRUKBdatW4c1a9agT58++PLLL4WL/ZkzZzTWahkZGYlq/apTm3ldXFwwZMiQKmuFWrVqVe3yT7If1HR5XMzNzTVui56eHoyMjGq1LiIiatiem3ClUqmwZ88eJCcnIyAgQNTkk5WVBTMzM63XJZPJkJqaKuqgXpPu3bvjf//7H+Li4nDr1i04OzvD3t5eCF5BQUFITk7GvXv30LlzZ6E8uiy3mq+vLzw8PGq9XGxsLH7//XeMHTsWX375paj57sGDBxqXUSqVWt+9V5t5W7duDX9/fzRr1kyr+TWp634AdH9cMjMzUVpaWilIlZaWVroDlYiIGrfn5m7BoqIiJCcnw9nZGQ4ODqLXlEpltXfJVeTq6gq5XF6pGag6zs7O6NKlC06ePImIiAh07dpVuAD37dsXMTExOHXqFG7evAkvLy+hyUeX5XZwcEB+fj5CQ0O1DjHlZWRkIDk5GT169BAFKwCVymFoaIi2bdsiKioKjx49qrQu9ZAL169fr9O8V65cqfOjjZ50PwC6Oy4mJibo2LEjoqOjNQbUBw8eIDo6Gh07doSJiUmdykpERA3LcxOuZDIZjI2NER0dLfS5AcrGVgoICBBNq4mbmxtcXFzwyy+/IDo6WrSugwcPIjk5udIyZmZm6Ny5M3bu3IkzZ85g4MCBQh8eR0dHdO7cGVu3bgUg7suly3L36tULLi4u+O2333Dp0iXRaykpKfjmm29Ew0hUZGBggKZNm+LcuXMoKCgQpt++fbtSk6CRkRGGDh2KxMRE7Nu3TxQ2UlJSsGPHDiEo1WXe4uJi/Pjjj8jIyBDmValUOHv2LH755Zdqx+x60v0APNlxKd8fTF9fHz4+PsjIyMCmTZtE+7WgoACbNm1CRkYGfHx8qu0YT0REjcdz0yyor6+P0aNH4+jRo/D398fAgQPRunVrHD9+HHl5eZDJZEhISEBRUVGNfVzs7Owwa9YsLFy4EBMnTsSIESNgamqK4OBgJCQkQCarvNukUim8vLwQEBAAV1dX2NvbC681a9YMvXv3xrlz5zBq1CjY2Ng8lXK3bdsWn3/+Od577z1MmzYNPj4+sLe3R0ZGBgIDA2FpaQlfX19YWlpqXL5Dhw549dVXsWfPHty4cQMDBgxAdHQ0Ll26BAMDAwBlA3P26tULADB48GD8+9//xvfff4/z58/Dy8sLhYWFOHr0KORyOVavXi3c0VebeX18fPDuu+/i+++/x6hRo/Dqq6/CxMQEkZGROHv2LHx9fSGXyyvVrulqP9T1uFhYWMDGxgbr1q1DYmIiPDw8MGrUKPTv3x+zZ8/Gjz/+iKioKHh7ewMoG0w2JiYGs2fPRv/+/as9tkRE1Hg8NzVXAIRw4+XlheDgYAQGBmLIkCE4ePAgRo0ahfT0dFFNSFUkEgnGjRuHrVu3wsXFBdu3b8fevXsxdOhQ7NixQxScynNzc4ODgwO6deuGli1bil7r3bu30P+qYkjSVbmBsibIAwcOYOzYsTh9+jS+//57nDlzBm+88QZ+++23au+ANDIywuLFi/HJJ58gLy8Pv/76K3Jzc7Fq1Sps2LABMpkMcXFxQlObsbEx5s+fjzVr1kChUGD9+vXYu3cvBg0ahN27d6Nv377Cumszr1Qqxfvvv4+AgADY29vj119/xdq1a5GTk4NVq1Zh6dKloiENdL0f1Gp7XJycnDB//nzIZDL89ttvSExMFLZn9uzZ2Lx5M8zNzbFp0yZs2rQJ5ubm2Lx5M2bPns07A4mIniMSVV07pdRCpl3lC5l5YrSGOYnoRcLvBiJ6Hj1XNVdERERE9Y3hioiIiEiHGK6IiIiIdIjhioiIiEiHGK6IiIiIdIjhioiIiEiHGuwgovHx8YiKikJpaWl9F4XomdPX14ebmxvatWtX30UhIqJaanDhKj4+HosWLUJgYGB9F4Wo3o0YMQLLli1jyCIiakQaVLNgfHw8RowYwWBF9P8FBgZixIgRiI+Pr++iEBGRlhpUzdWiRYuQlJQEAOjfvz8mTZoEExOTei4V0bOXl5eHP/74A+fOnUNSUhIWLVqEbdu2PfX3jYtTYuvWYpw+LUdcnBJxcUrY25f9BrO314O9vR6mTTPA4MEN6quDiKhBaTCPv4mPj4eHhweAsmB16NChp10sogZv5MiROHfuHADg6tWrT615cPHiQixZUqT1/GUhqwkWL67+YeI14eNviOh51GCaBaOiooT/T5o0qR5LQtRwlP8slP+M6EpIiBwSSVatghVQVsO1ZEkRHBxysHhxoc7LRUTUmDWYcFX+rkA2BRKVKf9Z0PWds4sXF8LbO1c0zd5eD198YYjgYFPExppBpWqB2FgzxMaaITjYFNOnNxGaCYF/QhYDFhHRP9hxgugF5O2di5AQufB3dc185ftcDR4sE/plBQSUIC5OCQBYsqQI8fFK/PJL02ezAUREDViDqbkiomdj8eJCUbAaPFiG2FgzrftP2dvrYfFiIwQHm+KLLwyF6Vu3lrAGi4gIjThcJSUlYeXKlRg0aBAcHR3h6uqKadOm4dSpUygpKak0/+7du+Ho6Kjx3/Dhw7FlyxYUFBRoXObixYtVlmPFihU1zkPUUISEyEX9q6ZPb4LgYNNK86mbDCWSLDg45MDPL79ScLK318P06QaigLVkSRG2bq38+SMiepE0ymbBsLAwzJs3D1lZWejZsydGjRqF7OxsHD9+HG+++SYmTpyIzz//HMbGxpWWHTJkCNzc3IS/8/LyEBwcjGXLluHQoUNYtWoVHB0dn+XmED0zfn75wv8HD5ZVasaLi1PCzy9fVLNV1gxYFpgCAkoQHGwqaiqcPt0A8fH/zLNkSSGmT2/ytDeFiKjBanThKiYmBv/9739hYmKC9evXC8M3AMBnn32G//3vf9iwYQPs7e3h7+8PiUQiWn7o0KGYMGGCaNqCBQuwc+dOLF26FFu2bMHnn3+OJk14caDny9at//SRAqCxxqpisKpIHb7KL1vWCd4IISH/jI21eHHhEw/TQETUWDWqZkGFQoHff/8diYmJ+Oijj0TBCgAMDQ3h5+eHzp074+TJk8jKytJqvVKpFKNGjcLAgQMRGhqK5OTkp1F8esGsWbNGGBS3OklJSdizZ89TL8+SJf8062nqeL51a4koWH3xhaFwt2D5pr+QEHmlpj91wFILCGDTIBG9uBpVuMrNzcXVq1fh4uKC7t27a5ynZcuWmDlzJry9vVFYqH3n2mbNmsHFxQXx8fFIS0vTVZHpBbVmzRqsWbMGr7/+erUBKykpCZ988gk++eQTrFmz5qmVp2KtlaZmu9On/xnq4YsvDIWaJ3UH9vKBLCCguNLygwfLhJHb4+KU1daAERE9zxpVuHr06BEePnyI9u3bo1mzZhrnUddCvffee7CxsdF63SqVCgqFAk2bNoWBgYGuikwvqHHjxsHW1hZJSUlVBix1sLpw4QJsbW3xwQcfPLXylA9OVfWHKh+Gpk+v/Bko/8ib8kFNzd5eD4MG/TNPSIhux+UiImosGlW4UrO0tIShoWHNM9ZCWloaIiIi0K5dO1hbW+t03fTisbW1xY4dO6oMWBWD1ZkzZ55qecqHoUGD9DXOox40VKVqIRooVK3i4KGaDB78z7pPn2bNFRG9mBpVuMrIyEBycjKkUqmoo/q9e/cwYMCASkMsaDM8gkqlQkJCAr788kuEh4dj6NChaNmy5dPcDHpBVBWwnnWwAsRhqK4PXS4/FENVtV/aBDAiouddo7pb0NzcHNbW1igsLIRCoYBUKgUAmJqa4vXXX0dxcVk/kNDQUISHh2tcx6effopPP/200nSZTIYZM2Zg1qxZwnqJnpQ6YKmD1euvvw5bW9tnGqwAcdDRVCtVk4oPdp42jU3nRERVaVThSiKRQE9PDxkZGSguLhbGsbK0tMQ777wjzFdSUlJluKo4zhVQdgHs06cP2rRpU2noBqInVTFgJSUlPdNg9aQqPionONi0ytov1lwRETWycNW6dWs4ODggKioKjx49Qtu2bWu9Dk3jXNUkMzNT43SFQoHCwkJYWFjwYdNUrfIBC8AzD1b29npC2ImLU2pde1U+WNnb6+GXX5pW26z4pDVkRETPg0b17Wdqaoq+ffsiNjYWwcHBUKlUT/X91H2vbty4ofG9cnNzER0dDUtLS/bTohqpA1Z91FjVtUapfI1VTcGq4vwMV0T0ompU334SiQS+vr7o1asX/ve//+H48eNQKsUXipSUFFy5cgUymeyJ+065u7vDy8sLO3bswNmzZ0UBq6SkBFu2bEFYWBheeuklhivSiq2tbb28b/khEjSNUaVJ+YFCp09volVH+PJDPjBcEdGLqlE1CwJlTYNLly7FnDlz8M4778Dd3R39+/eHVCrF9evX8ffff0OhUOA///kPPD09n+i9WrVqhY8++ggffvghZs2ahZ49e6Jbt24oLCxEcHAwYmNjMXLkSPj5+bETPDVogwfrCx3StR3cs2zUdUNheW2UXzc7vRPRi6rRhSsAcHZ2xr59+7B3714cOHAAmzZtglwuh5OTE6ZMmYKJEyfC2dlZJ53Te/bsiT///BPbtm3D/v37cf78eRgYGMDLywvz5s3D0KFD+RxCavAqDgC6dWtJjQ9XLj/iujYqjgJf1yEfiIgau0b77WdsbIypU6di6tSpWs0/YcKEWndkV2vZsiXmzZuHefPm1Wl5oobgl1+aws8vH0DZcwZrClflh18o/zicqtT07EIiohcFO0UQvSAGD5YJ/aDi4pRC0NIFb+9codbK3l6vxuBGRPQ8azDhSl//nz4deXl59VgSooaj/Geh/GekLsr6UP1T+7R1a4lo1PW68vPLr3RXIRHRi6zBhKvyA3v+8ccf9VgSooaj/Geh4uC3dTF9ehOhkzoALFlSBG/vXI3zLl5sJDxrsKomQW/vXNFdhV98Yci+VkT0wpOonvZgUQAy7ZwrTTNPjK40berUqQgMDAQA9O/fH5MmTeLgnPRCysvLwx9//IFz584BAEaMGIFt27bpbP0VH2djb6+HadOa1NivqqrlgbJmx+Bg01qVQ9vvBiKixqRBhav4+HiMGDECSUlJT7tIRI2Gra0tAgMD0a5dO52uV1NAsrfXw+DBMkybZgB7e71KI7uHhJRWWgbQrsO7JgxXRPQ8alDhCigLWIsWLRJqsIheZCNGjMCyZct0HqzUQkLk8PPLr/NzALV5JE51GK6I6HnU4MKVWnx8PKKiolBaWlrtfETPI319fbi5uT21UFVe2bhXxRprpKpS22bEqjBcEdHzqEGFqxs3bsDd3f1pF4eo0XlWnw110Dp9Wo64OKXoIc/29noYNEiGwYP1ddZpneGKiJ5HDSpcEdGLhd8NRPQ8ajBDMRARERE9DxiuiIiIiHSI4YqIiIhIhxiuiIiIiHSI4YqIiIhIhxiuiIiIiHSI4YqIiIhIhxiuiIiIiHSI4YqIiIhIhxiuiIiIiHRINw8IqwNNj70gIiIiauxYc0VERESkQwxXRERERDrEcEVERESkQwxXRERERDokUalUqvouBBEREdHzgjVXRERERDrEcEVERESkQ9LFixcvru9CvMgiIyOxatUqWFlZwdLSsr6LQ7WUnZ2N7du3Y9euXbhw4QI6duyIpk2bPpP3TkhIwKpVq6BQKODo6Kj1cjk5Ofjuu++QkJCALl26PMUS6l5MTAy2bNmCZs2aoXXr1lXO96w/V6mpqdiyZQuKiopgb2//1N+PtLd9+3YEBgaiS5cuMDQ0rO/iUAVFRUXYsGEDLl26hM6dO0Mmq7fhN3WqQW3FyZMncfz48Rrnmzp1Kjp37vwMSkRUtdLSUuzduxfx8fEYPHgw7Ozs0KJFiyrnLyoqwvHjx3Ht2jXk5uZCJpPBxsYGvr6+sLa2foYlb7xKS0uRlZWFkpKS+i6KiFKpRE5ODoqKiuq7KFpTn48REREoKCiATCZDmzZtMGzYMDg5OUEikQjzbt++HdeuXRMtL5FI0Lx5c/Tr1w/9+vWDVCoF8M/3uKbv6aKiImzZsgUAMGPGDIYdem41qHDl7u4OCwsL4e8bN27g9u3b8Pb2Fk23s7Orj+JpVJsvi+3btyM+Ph7vvfcezMzMnlURG5TIyEhs27btmQfkp/GlnpGRgaSkJHh5eWHYsGHVzpuZmYktW7YgKysLXbp0Qfv27ZGXl4fQ0FD88MMPmDhxIjw8PJ64TM+7jh074quvvqrvYlRibW2N//73v/VdDK1lZmZi06ZNyMnJEZ2P58+fx5YtWzSej8bGxhg6dCiMjIwAAHK5HBERETh8+DBSUlIwfvx4IWARvegaVLiysrKClZWV8HdGRgaio6Ph4uKCtm3b1mPJiCorKSmBQqGoMagpFAocP34cjx8/xowZM+Dk5CS81qNHD2zatAknT56Ek5MTTE1Nn3ax6QWnUCgQGBiIvLy8Suejl5cXAgICcPjwYdjZ2Yl+1DZp0gRubm6iH4bdunXDnj17EBkZCS8vLzg4ODzTbSFqqBpUuKqtmJgYHDlyBMnJyQDKarS0aWIpLi7G4cOHcfnyZSiVStjZ2cHLywuHDh3ChAkTRDUqKSkp2LdvHxITE6Gnp4cOHTpg7NixaNasWaWq8s8//xxDhw7Fyy+/LHq/is2dX331Fbp27YopU6aIyvTnn3/i8uXLkMvlsLW1xcSJE0X9RUpLS3HmzBmcPXsWBQUFMDMzw6uvvgpPT09RFX7F9z5z5gxGjhyJv//+W9gOV1dXjBs3DsnJyThw4ADS09Mhk8nQo0cPDB8+HAYGBqKyHT9+HOHh4SgoKICxsTEGDBiAgQMHQl9fX5gvKysLBw8exJ07d6BQKGBhYYGhQ4eia9euSExMxKZNm4Rmk23btqF58+bV1uKlpqbi4MGDiI2NhUKhQKtWrTBq1Cg4OzsL26vevlmzZokCePlawsDAQK2Ok5pCoUBoaCjOnj2LnJwc6Ovro3v37hg+fLgQpMof++PHj+P48eNVrjM9PR137txBly5dKvWNatq0KTw9PXH06FE8ePAAHTp0qNU+r0ipVOLq1as4efIkMjIyIJVKRedsxe0MCgrCmTNnUFBQAHNzc4wcORKdOnWq8nyq7ftlZGRg3bp16NChA8aNGyesNy8vD+vWrUPLli0xdepUSKVSrY63pppPTZ9n9X6sjjbnjvrcLP89AAA2NjYYPny4EEwSEhKwadMmDBw4EC+//DJycnLw008/oV27dnB1dcXRo0eRk5MDExMTjBgxQvSZValUuHLlihB4zMzM4O3tjStXrqB58+ai74mKsrKycOzYMURGRqK0tFSr74T09HTExsaiY8eOlc5HIyMj9O7dGzt27EBiYqIoXGkilUrRo0cPXL9+HTExMToJV9oeF/V8EydORHh4OO7cuQOlUglXV1eN53t5SqUSEREROHbsGHJycmBsbIyBAwdi0KBBQu1bxXn09fXRuXNnDB8+XLTums6N8lQqFXbu3Ino6Gj4+/uLKhP27t2LyMhIvPXWW2jTpo1W30Plz7Py50nF87Gi8jX5AwcOxF9//YX09HQYGBhg8ODBov0AiL/b1Z+x8ePHi65P2uxTAIiOjsb+/fuRnp4OY2NjeHl5QS6XPzd9rdQa7dZERkZi586dsLS0xLhx4yCXyxEaGor169fDz88P7dq107icQqHA/v37ceXKFXh4eKBDhw64f/8+9u/fD7lcLpo3Pj4ev/zyi/AeJSUlOH36NDZs2AB/f3/4+vpi6NCh+P333wEAr732msaah/79+6Nnz574888/kZycjGnTplX60jpy5IjQ/yY9PR1hYWHYu3ev0ISlUCiwb98+REVFoW/fvrC0tMTt27exa9cuFBQUoH///lXuq+LiYhw6dAheXl7o3bs3oqKicOPGDSQmJqK0tBS9evVCq1atEBUVhQsXLsDU1FT4QBYVFSEgIAAJCQno06cPbGxscP/+fZw6dQqPHj0SmgJyc3OxefNmlJSU4F//+hcMDQ0RERGBnTt3Ii8vD3369MHHH3+MW7duYe/evRg3bhw6depUZefvxMREbN68GYaGhvjXv/6FJk2a4PLly9i6dWutm9C0PU4AhP0cHh4unB+pqakICwtDWloapk2bBiMjI/j6+qJbt27YuXMnevXqhf79+4sCaXnp6enIz89H+/btNV7w+vTpg65du8LY2LhW+7wilUqFoKAgnDp1Cl27dsUrr7yCrKwshISEYOfOnZg2bZqolu3mzZtIT0+Hj48PACA0NBTbt2/HG2+8gY4dO9a4X7V5P3Nzczg4OCAuLg75+fkwMTEBUBacs7KyhC/euh7vqj7PQUFBNZZfWykpKdi4cSNMTEwwbtw4AMDZs2exefPmGvfVnTt3kJKSgsGDB0NPTw+hoaHYv38/WrZsKQSHa9euYc+ePbC0tMSrr76KgoICBAcHIzc3F82bN69y3eqmPaVSKVxwIyIisGfPHiEIaJKRkYGCggJRaC3Pw8OjVp8vfX39emsOLC0txZ49e9CxY0dMmDAB9+/fR3h4OI4cOYIJEyZU+zkJDg5Gt27d4OjoiKSkJJw8eRLZ2dkYO3YsJBIJwsLCEBgYiK5du6Jjx47CuZ2UlAR/f3+YmprW+tyQSCTw8PBAZGQkYmJihHBVUFCApKQk2NjYoFWrVlp/Dz2plJQU7N+/H/369YOJiQkuX76MEydOwNzcXDgH1OeZnp4eRo4cCZlMhrNnz2L9+vV48803YW1trfU+jY+Px/bt22FoaIjRo0cLn4m0tLTn7kaQRhmuCgsLERwcDBsbG8yYMUM4yTp06IBNmzbh2LFj8PPz0/gLPyEhAZGRkRgwYACGDx8OiUQCT09P2NnZYe/evcJ8RUVFOHz4MFq1aoXp06cL72FpaYmtW7fi+vXr6NevHyQSifABNjU11dhEZGhoCENDQ+jr60MikcDU1LTSB6NDhw6ii6ZcLselS5fw6NEj2NraIjIyEtevX8e4cePQrVs3AECXLl1QUlKCCxcuwMPDQ7hwVaSnp4cxY8YIy3l4eCAgIADR0dGYMGGCMN3V1RVZWVm4d+8eBg4ciCZNmiAqKgrx8fEYP368aHkzMzMEBQWhc+fO6NSpExISEpCeno7JkycLH8qOHTtiw4YNuHHjBry8vNCsWTMhQBgbG1cbcIKDg2FoaIg333xTCKLu7u4ICAjAiRMn4OjoWO0v0/KMjIy0Ok5A2flx7do1DBkyBC+//LJw8bGyssKuXbtw6dIlDBw4EEZGRkIwNDAwqLYPXWpqKgBUeQHS19cXLa/tPq8oPz8fd+/eRc+ePYUvMwCQyWQ4duwYkpOTRb+mW7Zsibfeeks4Di4uLti4cSPOnj0LJyenamvIavN+nTt3xo0bN5CcnCzUKN25cwfGxsZwdHR8ouN9//59XLt2rdLn2cbGBvv27au2/Nq6ffs25HI5xo0bJ/xoa9euHdatW4erV69WG65MTEwwY8YMmJubAwBat26NLVu2ICYmBm3btkVhYSHOnDkDS0tLzJo1SzinOnXqhPXr11dbrjt37kBPTw+vv/46bG1tAQDOzs5Yv349wsPD0alTJ43nXEFBAVQqlc46k6enp6O4uLhe+pEqlUr0798fQ4YMgUQiQZcuXZCbmyuEeU3nTGJiIs6cOYPBgwcLn3EPDw/o6enh8uXL6Nu3L8zNzXHjxg3Y2tpi7Nixwr4yNjbGn3/+iYSEBLi5udXp3LC1tYW5uTmio6PRu3dvodY2PT0dr776KvT19REbG6vV99CTkkqlmDp1qlB2BwcHrFu3Drdv34aHh4fQpUGpVGLmzJnCZ9Pe3h7r169HWFgYxo0bp9U+tbCwwKlTpyCTyTBz5ky0atUKQNl1bMOGDU+8LQ1Noxzn6sGDB0hNTYWHh4copJibm6NTp05ISUlBRkaGxmVjYmKgp6cHd3d30a82KysrUc1DWloaHj58WOk9rKys0Lx5c8TFxel0myp+Edra2kKlUkGpVEKlUuHmzZto0aIFXFxchHmkUinat2+Px48fIzMzs8p16+vro2XLlqLlDAwMYGJiImoWMDY2RsuWLaFUKqFUKqFQKBAZGQlzc3PR+6o/OMbGxrh165awrL6+PpKSklBaWgqgrMlr3rx5ePvtt9GkSROt90VWVhYSEhLg7OwsquEzMjJC9+7dkZmZiZSUFK3XVxtRUVHCr/7y54eLiwssLS1x584dYfuehtrs84pMTEzw7rvvwtfXV1T2Fi1aoLS0FAUFBaL5ra2tRQHXwsIC9vb2SE9PrzTvk7xfu3bt0KJFC0RHRwMo++ESFxcHOzs7NG/e/ImOd2xsLGQyWaXjpcvhMJo1awa5XI6kpCQolUoAZSHp888/x2uvvVbtsjY2NkKwAsr2sbGxMRQKBYCyYPLo0SO4urqKymxqaipaThN1bbA6WAFl/aKaNm2KgoKCKs/TnJyc6jdYS0qlErdv38axY8fQsmVLODs762S9tWFgYAAXFxfh2EulUrRp00b43tTk7t27kEqlonNGIpGgffv2KCoqQnp6OqRSKYyNjfH48WM8evRIWLZ379745ptv4ObmBqBu50azZs2Emp2srCwAZdclmUwmNKs+q++h1q1bi5r2TExM0Lx5c+H8zM3NRWxsbKXPZvPmzWFpaYnU1FQUFRVptU/z8/ORkpICR0dHjdej502jrLlSf3Fo+lXSrl07hIaGIicnR9SerZaamgpDQ8Nqq9sBCLdVHzhwAAcOHKj0+tO+db580CouLkZ2djbS0tKwdOnSSvPW9ReoRCKptm+N+gJpbGxcqT3cxMQE5ubmyMzMRGlpKdq2bYsBAwYgODgYYWFhsLOzQ/fu3dG1a9daf3AKCgpQUlKi8ZewpaUl9PX18ejRI6361dSWum9DxZpFFItrTQAAIABJREFUdfBMSUlBcXFxjbU6dVWbfV7V8mFhYQgLC0N2dnaVF5iqtGzZEjdv3kROTo5WNRHavJ+pqSkcHBwQExODgoICZGdnIz09Hb169YJUKn2i452eng5DQ8OnWmvi7u6OO3fu4ODBgzhy5AicnZ3RvXv3KmuGqlPxM6f+nqnr90lKSgoOHz6M2NhY0TlRXROLpn2l7qNTfiiJin0Is7OzNd6paW5ujsmTJ9f4nfqs6OlVX2eQmpqK/Px8fPfdd1XOI5VKMXToUGzbtg3ff/89zMzM4Orqij59+sDKyko4hnU9N9zd3XH58mXExsbCzMwMsbGxaNOmjVCbo+330NNQfv89fvwYhYWFuHjxIi5evFhpXvV5ps0+ffz4MYqLi9G6dWut+3Q2Zo0yXFVHnbh1pX///hqHfqhuPKOnpWXLlhg8eHClC7tUKhX9EnhW1L/U1GXw8fFBnz59EBUVhatXr2L//v04cuQIXnvtNa368Gijvh6Fqa5FrAv1L8Oqzs3ygaom1ZVBoVDg999/R3R0NF5++WW4u7tDKpXi9u3boiZvXdH2/SQSCTp16oTIyEikpqbiwYMHkMlkWvWxaAiPPjUwMMDkyZPh4+ODa9euITIyEtu3b0fz5s0xdepU2NjY1Eu5YmJiEBAQADs7O6HJpqSkROhbWBVjY2NIJBJRkGrZsiXGjx8PhUKBxMREnDt3TuNy5Ydi0NPTQ5s2bWBhYSG6IDeG4Rgqbkt56u97KysrfPjhh0hMTERERARu3bqFixcvws3NDRMnToShoWGdzw07OztYWVnh1q1baNOmDR48eABvb+8af7Q9yffQk+jcuTPc3d0rTTcxMRHKXNM+ffz48VMvZ0PSKMOVuglK08FKSkqq9pdsmzZtcOfOnRp/mavfo3nz5vD09NRZ2etCX18fxsbGyM/PR8eOHbXua6Sr93306FGlzv55eXnIzs6Gq6sr9PX1UVhYCLlcDhMTE/Tp0wd9+vRBWloatmzZgpCQEDg6OmrdNGhsbIwmTZpobL5IS0tDaWnpUwuTZmZmKC0tRWFhoej8KCwsRGZmJszMzGpdE9eqVSs0bdoU9+7dQ9euXSv9ajt//jyOHj2K6dOno3379lrv84pSUlKE/nLl+2NoE9qAsi/utLQ0rWuCavN+tra2MDExwd27d5GSkiI0Carnr+vxbtWqFe7evYvc3NynUnulUqmE5k1zc3O89NJLeOmll4Rgc/bs2RqbBqvTokULGBkZITU1tdbjvl26dAlGRkaYMGGCsC+LiopqDDdWVlZo1qwZbt68iW7duglNYOrR+mUymcZwpWkoBk3Ux0rTjwm5XC7c6VxfIUxdfnt7e7Rp00bjPAqFAvn5+WjSpAns7e1hb28PpVKJEydOICgoCDExMejUqVOdzw1jY2M4OTnh2rVruH79OlQqFdq3by8qozbfQ9o03z8J9WfTyMgIHh4eVdY4abNPgbIfKuWbWZ9njbLPVZs2bWBpaYmrV6+isLBQmJ6ZmYmbN2/C2tq6yluI1R+SyMhI0a/ihw8fiqpZraysYG5ujr///lsU4lQqFW7duvXUT+ry1Le3Z2Rk4OrVq6JyZ2dnIyYm5qn8wle3oWdmZuLu3bvCdJVKhatXr6KgoACurq4Ayu52XLVqFR4+fCjMZ2FhASsrK6EPl7ZatGiBtm3bIjo6WtR3rrCwEOHh4TA3NxeaUVq2bIni4mLRhbm0tBT5+fl12mY3NzeUlpZWOj/u3r2L1NRUdOjQodZNgq1atUKHDh1w/fp1xMfHi17Lz8/HlStXYGFhgTZt2tRqn1ek7mdSfl+rVCrRMSkvOTlZ1FcvMzMTCQkJQhCqSW3eT93P5OLFi7h//z48PT2Fi2ttjndFzs7OUCgUuHXrluh4qfuyVEebc0cul2PHjh1Yu3YtcnNzhenq75gnrSm3sLCAhYUFoqKiRO+bm5tbbT9KoCwAVPxc5efn11hDYG5uDldXV9y5cwfXrl3T+XdH69atYWpqimvXrlXaP3fv3kVGRka1nyNdf6Yr6tChA0pKSnD+/HnR/issLMStW7egVCqRnp6O1atXIzAwUNg/enp6aN++PWQyGf4fe/cdHtV1Jn78OzOa0ah3VEACdYGqBRIYjCkCbDC4Aq4xxF4nxuvE6zib7Dre/LzZ3Sd5NvEm2ImxTTAY2zHrggvFmCbLFIOEugQIFVQQqKBeZjQq8/tDO9cMkkASQyTw+3mePMEzV/eee865975z2u3r67vmuhEdHU1XVxeHDh2y6hK0fDec+5C9vT2urq4D3g7Q3t5ukzFZbm5uBAQEcPLkSWXJI4vy8nLleh1Onrq4uBAYGGj1d9C/XqCtynY8uSFbrhwcHFiwYAEffPABb731FrNnz1aWYujs7OTBBx8c8sINCgoiNjaWQ4cO0dbWpkzdzsrKstrOycmJlJQUPvzwQ9566y3mzJmjzJ4rKiri/vvvZ/r06Wi1WpydnSkrKyM9PZ2goKAhuzs8PDyU5Q5CQkJG9D64hIQEcnNzlfVIQkJCaG1t5dChQ3h6erJmzZrr8k676OhoTpw4wSeffML58+eVZQEyMzOJj49XxsEkJyeTn5/P1q1bmTt3Lo6OjhQVFXH69Gluv/12ZVyYs7MzOp2OzMxM5dfa5S0dGo2GBQsWsGnTJjZu3MjcuXOVqflVVVWsXr1aab0LDAzE1dWVHTt20Nrailqt5ttvv6W+vt5qsPZwyykoKIj4+HgOHDigPAQsU6CDgoJISkoacR5axm9UVlayceNGZapya2srR44coaOjg1WrVinpHW6eX87Hxwdvb2/S0tJoa2sjICCA7OzsAQGdRWNjIxs3blSW8Th06BDt7e3Mnj0bjUajrKGTnJw86Fo5Iz2eZZyJq6urVVf7SMr7cpbr+dLyslzPVxvXMZy6o9VqmTVrFh988AGbN29m9uzZqNVqcnJyuHDhwhWXQBkOBwcHbr/9dj788EP++te/Mnv2bDo7Ozl8+DDt7e1X/NuoqCjy8vJ4++23ufXWW6mvr1deZXOl1m2VSsXSpUu5ePEiH374IXl5eURHR2NnZ6d0gen1+lF3d06YMIFbb72Vffv2sX79em699VZ0Oh0lJSXk5eUxefLkK15Hw72mRyskJIT4+HjS09Pp6OggOjoak8nEkSNH6OnpUWayTZs2TVl3MDIyks7OTg4dOoSjoyMBAQHXXDf8/Pzw9vbm3LlzREVFWT2zhnsf0uv1hISEkJaWxqeffkpUVJQyg9YW3YdarZaUlBQ2b97Mli1bmDt3Lq6urso1Nm/ePBYtWjTsPJ03bx6bN29WrnO1Ws3Ro0dpaGj4u72T9e9lXL+4uaysjIqKChITEwc0Rfv6+jJlyhRKSko4fvw4RUVFeHl58YMf/MBq9szl1Go14eHhdHZ2kp2dTUFBARqNhpSUFEpLS4mOjlbGyPj5+TFlyhTOnj1LRkYGJ0+eRKfTsWrVKqKjo1GpVKjVary8vCgqKlJalSwzSS7n4+NDeXk5OTk5tLS0EB8fT0NDA3l5ecTHx1vN2qirq1Oa7d3c3JSZIyaTiZycHLKzs6muriYhIYGVK1cO2fUzVB7m5+fT0tJCcnKy1YD4/Px8urq6uOWWW7Czs1NmYnV1dZGZmUl2djZNTU3Mnz+fO++8Uxl0bRnwWVVVRXp6Onl5eXR2drJo0SIWLFigjMlwdXXFYDCQm5tLcXEx06ZNG/RBcPn+LLNnHnzwQatFLh0dHfH396e4uJisrCylDIODg6mtrVXOb7jlpFariYqKQq/Xk5OTw4kTJzh37hyJiYmsWrXKajxBS0sLWVlZTJ48+aqBsmXmW1dXF7m5uWRnZ1NeXs6kSZN47LHHrGZaDTfPLz++Vqtl6tSp1NfXK/nr7+/PkiVLOHPmDJ6enoSGhtLV1UVGRgYRERFERkaSmppKQUEBzs7OPPTQQ0paLNtNnDhx0PMb7vEs9Ho9J0+eJDAwkBkzZlgFP8Mt77q6OqvrZSTX8+WGW3cuv9cUFhYCcM899yiLdV5eFpa8c3d3t3o59mB56uvri6enJ/n5+eTk5FBXV8eCBQvo7OxEr9cP+XJtSwtJQUEBeXl5tLS0cNddd6FWq6mtrSUxMXHIyS5arVZ5kfGpU6fIysoiPz+fpqYmoqOjeeSRR6wC4KHuF4NRqVQEBwcTHBxMWVkZGRkZFBQUKIH7pUsbXEu5DHVvKysro7q6Wtnu8rRbFlG2fJeZmUlpaSnh4eE8+OCDyhiyy+8DpaWlBAQE8PDDDyutTMOpG0PRarW0tLRQU1PD4sWLrQLHkdyHgoKC6OzsJCcnh/z8fEwmE3fccQfV1dVDXrs9PT1kZ2cDKPf6Sz+/tN65u7szdepUqqurOXHiBPn5+RiNRpYvX64ElMPJU8u+Jk6cyJkzZ8jKyqKyspJZs2Zhb2+PyWSySsuNTmUeDyNGx4HCwkK2bt3KY489Ji+FFoL+RXQ3bdrEHXfcwZw5c8Y6Od8rra2tvP766wQGBl5xhXZx4+rt7eX999/HaDQOuS6juHHdkGOurkVvby9fffWV1XgGyzgqR0dHq35vIb7PKioqsLe3t9lMTzE4yyyzS507d46WlpabbtVq8R3LGMfLuwTFzeHmaH8bAcuCZxkZGUr/saWraPr06RJcCUF/90BDQwOzZs266mKWYvTMZjPV1dUcOXKEM2fOEBISQnNzM2lpaXh6eg45xEDcuJqbmzl16hRHjhzB3t5+yG5fcWP7XnYLGo1G9u7dqwz+dHNzY+7cucyZM+eGWKNFCHHzuPzl15Yxd5e/IFjcHE6ePMnWrVvx8vJi9erVQ74HV9zYvpfBlRBCCCHE9fK9G3MlhBBCCHE9SXAlhBBCCGFDElwJIYQQQtiQBFdCCCGEEDYkwZUQQgghhA1JcCWEEEIIYUMSXAkhhBBC2JAEV0IIIYQQNiTBlRBCCCGEDUlwJYQQQghhQxJcCSGEEELYkARXQgghhBA2JMHVCLz/vomEhFaKinrHOilCCCGEGKckuBJCCCGEsCEJroQQQgghbGhcBle1tbVs3LiRF198kV/+8pf84Q9/4MyZM5jNZmWb/fv38+tf/5qCggLeeecdXnzxRf7lX/6Fd955h9bWVqv9dXd3c+DAAV5++WV+8Ytf8F//9V9kZWVZ7c/Wysv7+NGPOvHyambKlBZeeslAY6P18errzfzud0aiolpRqZpYtKidgwd7sCTLYDDz9NOd/OhHnbz/vom4uFaefrqTxsY+nn66k6ef7uTo0R5Wr+7A1bWZuLhWtm0z0dMzdLqOHOlBpWriiy+6eeklA1OmtODl1cy6dZ3U1PRRVXX1dBsMZt54o4tZs9pQqZqYNauNDz4w0dU18DifftrNj37UiatrM0eO9Cesrw/27+9m0aJ25e+vlm4hhBDiRjHugquqqio2bNhAQ0MDd911FytXrsTJyYktW7aQm5trtW13dzcff/wx9vb2rFq1iqSkJIqKiti9eze9vf3jonp7e9m+fTtpaWnMmjWLhx9+mJCQED788EOOHDlyXc7h1KleHnqoAzs72LbNiVdfdSQjo5enn+6kqak/UKmt7eOppzo4cqSH//5vB/bvdyEuTsMTT3Rw9Kh1lPH55yZeecXI3XdrmTfPDo1GBcDhwz289JKBBx7Q8sknTtx2mx0/+5lhwN8P5vnnO3F2VrFlixMvvaTnyy+7Wb26g4ce6mDqVDWffOLMc8/Z8957Jt58s4v/y04MBjO/+IWBP/2pix/+UEdqqgsPPqjlX//VwJ/+ZBwQID3/fCft7WZ+9Ss9EyaoMZth2zYTzzxj4K677EhNdeGHP9TxL/9i4C9/6eI6xrtCCCHE34XdWCfgUr29vaSmpqLX63nqqafw8vICICYmhnfeeYd9+/YREhKCq6srAH19fdx2222kpKSgUqmIi4ujra2N8vJyOjo6cHV1JT8/n7y8PB544AESExMBiIuLw2QycezYMRISEnB2drbZORgMZtav7yIuTsPvf++Ak1N/IDRlipqHHurg0KEe7r5by4kTvWi1Kv7wBwfCwvpj3MREDeXlveza1c2sWd8VTXCwhs2bHZk6VaMcA8DZWcVf/vLd59HRGk6d6uXIkR5uv/3KRfvv/+7Ao4/qUKlg3jw79HoVzzzTybvvOimfz51rR329mSNHemhtNePhoeLo0V527Ojm7bedWLiw/xjz5tnh66vmxRcNLFhgR3Lyd8det86eF17QY/d/HxUV9fLf/23k17/WWx1frYYtW0zce6+WyZPHXcwvhBBCDNu4eoo1NTVRWVlJeHi4ElgBODg4MH36dBobG7lw4YLyub29PREREahU/QGMRqMhICCAvr4+zGYzZrOZkydP4uHhQUREhPJ3Go2GsLAwWltbaWxstOk5VFb2cexYD8uXa5XACmDyZDXR0Rpyc/ubgO66S8tHHzkpgRWAXg8TJqhpbDRjMn3XhJOQoGHKlIFFdfnn7u4qwsI0w+peCw5W83/ZhkoFrq4qYmM1zJihUT7XaPoDtp6e/q683l44cKCbpCQ7pk/XKPtSqWD+fDuCg9VK15/FbbfZKYEVwIkTvTg5qZg7187q+ImJdpw928e5c31XT7wQQggxjo2rlqvOzk5MJhNubm4DvvP19UWr1XLx4kUiIyOH3Ida/V2w0dXVRXNzM3V1dfzmN78ZsK1er7dNwi9x8aKZ3Nxe7rmnfdDv4+IclH+Xl/fx2mtd7NrVbbW8w49/bD+qY6tU/QHRaKnVKAHPYEwmM42NZry8VOh01t95eKiIjNRQVWXGZBp6H2VlfRw92sOUKS2jT6gQQggxjo2r4OpKrmXwube3N/Pnz0er1Vp9rtFo8Pb2vtakDer117/rrruUr29/9JKX18vjj3ewcKGWjz92wt9fjdFo5t/+zXhd0nO9mc0o47KuZuZMO371Kz0uLgMjucjIa4gOhRBCiHFgXAVXjo6O6HQ6WloGtmrU1dXR3d09omBIq9Xi6OhIR0cHUVFRylit68nFRUV4uAZfXzXz5w+dvbt2dePvr+bFF/V4e/cHGQYDA1qExhOdToWnp4ri4j5MJnD4rhGO5mYzJSW93H239ornMGFC/7nGxGgIDh5XvdJCCCGETYyrp5uHhwdBQUEUFxfT0NCgfG4wGMjMzMTT0xN/f/9h70+j0RAZGUlDQwM5OTlWrV/Nzc2UlpbafDmGKVPUJCZq+OADk9USBmYzZGT00NbW/1lPT///Lj18S4t5XI850mggJUVLRkYPmZnfNVOZzfD11z2cPdvHnDlXjtdnzrSjpqaPzz4z0XfJqXZ0mMnI6LH6TAghhLgRjauWK41Gw4IFC9i0aRMbN25k7ty56HQ6Tpw4QVVVFatXrx5x61NCQgK5ubns2bOH+vp6QkJCaG1t5dChQ3h6erJmzRp0Oh0ffPABTU1NPPHEE7i4uAy5v87O/kHZFy4MDMpiYjR4e6tYt86eNWs6ePrpTtas0aHXq0hL62bbtm42bHAkJcWO2bPt+P3vjTz3XCePPqqjsrKPzZtNZGT0jHrM1d/D7NkaVqzQ8swznTz/vD2RkRqys3tYv76LdevsSUy8cpWKidGwbp09/+//GWlqMjNvnhaj0cw775gwGMy8+aYjAQHjKuYXQgghRmRcBVcAgYGBrFu3ji+++IJdu3bR29uLj48Pa9euJTw8fMT7c3Bw4Ic//CF79+4lMzOT48eP4+joSGJiIkuWLEGv19Pd3U1fXx8mk0lZH2soxcW9PPZYx6DfHT7sgre3HfPm2bFjhzN//GMXjz/ev+099+j46CMn4uL6xxQtXGjHZ5858/LLBpYvbyclRctvf+vAN990c/RoL+3tYMMVImzGwUHFf/+3A++8Y2LzZhPHj/cwc6Ydv/2tA/ffr7OaGTgYOzt44QU94eEaXn+9i//4DyORkRrWrtXx5JP2+PhcYUS9EEIIcQNQma/nMuVCCCGEEN8z467lajxqbzfj4tI81sn4XnB2VtHW5j7WyRBCCCFGTVquhBBCCCFsSEYOCyGEEELYkARXQgghhBA2JMGVEEIIIYQNSXAlhBBCCGFDElwJIYQQQtiQBFdCCCGEEDYkwZUQQgghhA1JcCWEEEIIYUMSXAkhhBBC2JAEV0IIIYQQNiTBlRBCCCGEDUlwJYQQQghhQxJcCSGEEELYkARXQgghhBA2JMGVEEIIIYQNSXAlhBBCCGFDElwJIYQQQtiQBFdCCCGEEDYkwZUQQgghhA1JcCWEEEIIYUMSXAkhhBBC2JAEV0IIIYQQNiTBlRBCCCGEDUlwJYQQQghhQxJcCSGEEELYkARXQgghhBA2JMGVEEIIIYQNSXAlhBBCCGFDElwJIYQQQtiQBFdCCCGEEDYkwZUQQgghhA3ZjXUCLpeamsrp06cxmUw237dOpyMqKooFCxbYfN9CCCGEEAAqs9lsHutEWKSmppKXl3fdjxMXFycBlhBCCCGui3HVLXj69Omb6jhCCCGE+P4ZV92Cl3YFPvfcczbf//r16wccRwghhBDClsZVy5UQQgghxI1OgqsbVEdHB3/84x959913pSVuEDt37uQ3v/kNtbW1Y52UcclsNtPV1cU4GnIphBA3jXHVLTjelZSU8Oc//3nA51qtlqCgIObMmUNcXBx2dpKtYnxLTU1l165drFmzhri4uLFOjhBC3FQkChiF4OBgIiMjlf9ua2sjPz+frVu3MnPmTB544AF0Ot0YplCI/tbNt956C29vbx588EGrOqnX63F0dMTR0XEMUyiEEDcnCa5GITQ0lDvvvNPqsxUrVvDhhx9y4sQJkpKSCAsLG6PUCXF1s2fPZvbs2WOdDCGEuCnJmCsb0ev1REZG0tvbS0NDw1gnRwghhBBjRFqubMRsNtPa2gqAi4uL1XcGg4Fvv/2WEydOcP78ebRaLdOmTWPJkiVMnDhxwL4aGho4cOAABQUFtLa2MmHCBObMmcPs2bPRarVXTEdNTQ1vv/02AGvXriUgIADoX37i0KFDpKWl0draSmBgIAsWLECr1bJp0yaeffZZpbVt586dZGVl8fDDD3P8+HFyc3NZsGABy5YtA/q7QQ8fPsy3335La2srrq6u3Hrrrdx2221W515bW8ubb75JcHDwgG4py/i1RYsWsXz5cqvtExMTueWWW9i7dy8nT54EIDY2lhUrVuDh4WF1vh0dHaSlpXH48GG6uroICQlh8eLFIxqo3dPTQ15eHqmpqVRVVeHo6Eh8fDyLFi3Cy8vLKl+dnJxYu3Ytbm5uAPT19bF7925SU1N58MEHSUpKQqVSYTabOXPmDPv376esrAy1Ws20adNYvHgxEydORKVSKcc3m81UV1dz4MABioqK6OzsVMonISEBtVo9ZJ5ZHD9+nA8++ICHH36YmJgY3nrrLSoqKgCoqKggMzOT6dOnK+Vg2f7Sch8sLyx1dbB0W+rJE088QWlpKUeOHKGuro4JEyawePFipk+frqRdCCG+TyS4soHe3l4KCgpITU0lOjqaKVOmKN+1traydetWKioqiIuLIy4uDqPRSG5uLn/5y1944oknrB5u5eXlbN26FaPRyC233IKLiwvl5eV8+umnnDt3jlWrVg05nqu1tZWPP/4YGBhYffTRR2RkZBAQEEBycjIGg4GPP/6Yvr6+QffV2dnJtm3baG1txdfXVwnqampq2LJlCw0NDcTFxeHj40N9fT0HDx4kNzeXtWvX4ufnd035mZWVRXp6OpMnT2bBggWUlpaSlZVFW1sba9aswdnZ2SpvS0pKCA0NJTQ0lIaGBt5++236+vqU7a7EZDKxfft2MjIyiIyM5M4776StrY3s7GxKS0t54okn8PPzw8/Pj8WLF7Nt2zaOHTvG4sWLUavVlJWVcfToURITE0lISEClUtHX18e+ffvYu3cvU6ZMISUlBZPJRG5uLq+//jpr164lIiJCSUNOTg4fffQR9vb2JCcno9PpKCoqYuvWrdTV1bFkyZIRBSn29vbcc889tLW1sWvXLtzc3FiwYAEuLi5XnGxhyYtjx44RGhrKkiVLlHSvX7+e1atXM2PGDKsAq7Ozk82bN6PRaJg2bRrh4eFkZ2ezbds2AJKSkoadbiGEuFlIcDUK+/fvZ//+/QM+nz9/PkuXLsXe3l75LD8/n+7ubp588kmioqKUz2fMmMHGjRvJzMwkODgYjUZDZ2cne/bswc7OjmeffVYJjvr6+khNTWXHjh1MnTqVxMTEAce2BBqNjY089thjyt8CnDx5kqysLJKSkqyCs8WLF/Pee+9RUlIyYH9Go5GYmBhWrlyJXq8H+h+++/bto7W1laeeekoJEMxmsxIM7N69m8cee+yaBvQbjUarAKSvr4/PP/+ctLQ0zp8/T0REBGazmYyMDEpKSrjzzjutApDz58+zZcsWenp6rnqs7OxscnJyWLNmDbGxsUrgcNttt7Fx40ZSU1NZtWoVdnZ2xMfHU1JSwtGjR4mOjsbb25tvvvkGZ2dnUlJSlHMuKSkhNTWVe++9lzlz5ijpWrBgAVu2bGHfvn1MmjQJR0dHGhoa+OqrrwgICOCxxx7D3d1dKZtPP/2Ur7/+mqioKKuA/Wrs7OwICQmho6MDBwcHXF1dCQsLu2qZZGdnc+zYsQH5OX/+fN577z127tzJxIkTreqW0WgkNjbWql7deuutbNy4kVOnThEfHy+TO4QQ3zvSZj8KwcHB3Hnnncr/Fi1aRGBgIIcOHeLrr7+2ag2aM2cOzz//vFVgBeDh4YG7uzsmk4ne3l6gPygoLi4mKSkJf39/ZVu1Ws306dOZNGkS5eXlA1qbTCYTO3bs4Pz58zz00EOEhIQo3/X29lJUVISjoyPz5s2zetC5u7sP2bLg4eHBokWLlMAK4MKFCxQWFpKcnGzV2qZSqYiIiCA5OZkzZ85w4cKFkWTnALNmzSI8PNzq/C2zM5uamoC7fe7tAAAgAElEQVT+h/qZM2cICgri1ltvtWrZCQgIGNbyAgaDgaysLMLDw4mMjLRqkfH19SUmJobq6mra29uB/hd/L1y4EHt7ew4ePMi3335LYWEhixYtwtfXF+jP7+zsbLy9vYmNjbVKl5ubGwkJCdTU1NDY2Aj0B2I1NTXMnj1bCaygf3kPSzewpXvverLkxWD56e7uTkpKCu3t7RQWFlr9nYeHh1VgCeDj40NQUBANDQ10d3df97QLIcR4Iy1XozDYbMFly5axd+9e9u7di6+vLwkJCcp3HR0dHD9+XBlzdSlvb2/l3w0NDfT29g4Y2wL9D7if//znA9LS3d3N/v37ycjIYO3atVZBCfQHIefPn8fT03PAeKUrUalUA9JQU1OD0WgkODh4QDeVWq0mIiKCr7/+mpqaGiZPnjzsY11OrVYPOPblrR+tra3U19cTHByMg4PDqI5j2UdjYyO//OUvB93G09OTrq4u5b99fX2V7kFLa6ClOxC+y+9z587x8ssvD3lso9EIQH19PS4uLkyYMGHANpMmTeI//uM/RnVuI2XJi8jIyEG7U/39/fH396empgaTyaSUx2D1RKVSSWuVEOJ7TYIrG1Gr1SQkJHD8+HEKCwuJjo5Gq9VSX1/Ppk2b6OzsZPbs2dx9992o1WoMBgO7du0adF8jeTDl5eUpY4IOHz5MSEgIrq6uA7bz8vK66mD44XJychr087F4oHp4eFzzcS9ft+xSdnZ2Vq13ANOmTSM4OJjKykpmzpw56PH9/PyIiYkZcozTpWWk1WptVjbXysnJCY1GM+BzrVY76OdCCCEGkuDKhhwdHXF2dqavrw+z2YzZbOb48eM0Nzfz9NNPW42bsYyHGcxQg8wHM2HCBNauXUt7eztvv/0233zzDcuWLVNallQqFWq1WumisUUA1NHRMaLPrwe1Wo1araalpYXu7u5rCk4sg9WHEzyYzWaysrKUcWr79+8nICBgwGKcbm5upKSkDKtVzVJXxoOOjg56e3sH5IXRaLRqwRNCCDE0GXNlQ62trTQ3NyvdWt3d3TQ1NeHr64uPj4/VtoM9UL28vNBoNFRUVAz4rqOjgy1btvDFF19YjWMJDAzE09OT0NBQli5dSmpqKidOnFD+3t7eHj8/P5qbm2lpabmm8/Pz80Ov13P27NkBAWBfXx9nz55Fr9crswUtAdD14OzsjJeXF7W1tXR2do5qH05OTri5uVFaWkpzc/Ow/qa6upr9+/czc+ZMHnnkEYqLi/n222+V/NZqtXh7e1NRUTGs9xr6+PjQ1NQ0oLsY+rsMN2zYQFpaGtCfn9er9cjV1RUfHx+rMWaXqquro76+Hj8/P+nyE0KIq5DgykZMJhNpaWm0tbURFhamdKPodDpqa2upqqpStu3p6eGbb76hsrLSah8BAQGEh4eTkZFh9WA2m83k5+eTn5/PpEmTBm2lUalUzJw5k/j4eHbt2kV1dTUAGo2GyMhI2tvbSU1NtXrJc3NzMxkZGcM+R39/fyIiIkhPTx8ww7CkpIT09HQiIiKUwfiWAKiqqoq2tjZl2+7ubgoKCoZ93MHo9XoiIiKorKzk22+/tQr2zp8/T15e3lX34eTkRExMDHV1dRw8eNAqbyyzEbOyspTAyWg0snfvXjQaDfPnzychIYEZM2Zw4MABZdC5TqcjNjaW7u5u9u7daxWoWGZVpqWlKTMZw8LC8PPz4+jRo8o6adBfRzIyMqioqFDGr7m5ueHj40NFRYVVQNnR0TFgoPmlTCbTVVvGHBwciIuLGzQ/W1tbOXDgAM7OzkRHR181X4UQ4vtOugVHobS0lD179ij/bTQaOXnyJHV1dSQlJXHLLbcA/YHN9OnTyc3NZdOmTURFReHq6kp+fj5GoxGNRsPFixeV7jpHR0eWLFnCe++9x6uvvqqsc1VZWUlRURFJSUnExMQMmS6dTseSJUt4++23+eyzz3j88cdxdXVl2rRpJCYmkpGRQXV1NdOmTcNgMJCdnT2iLkidTseyZcvYsmULGzdutFrnKi8vDy8vL5YtW6a0bOj1emJiYvjkk0/485//THx8PNC/NERzc/M1tcKoVCqSkpI4deoUe/bsobi4WFnnqrCwcFjrXKlUKubOnUtNTQ1Hjhzh7NmzTJs2DbVaTWlpKeXl5SxZskQJTDIyMsjLy+OBBx7Az88PlUrF/PnzKS4uZs+ePTz++OM4OjoSFxfH4sWL2bNnD6+88grx8fHo9XqrcrTku6enJ0uWLOHjjz/mj3/8o7J0waXHDwoKAvrHl0VGRpKWlsaf//xnpRzz8/OtAkMLSytaTk4O//u//8vEiROZM2fOgDFkFklJSVRVVVnlp2Wdq/b2dlavXm01i1UIIcTgNC9faUrT39nx48eVf8+aNWvc7b+xsZH09HSam5spKSlR/ldVVYWPjw933303KSkpVi1LHh4eRERE0NDQQFFREfX19cTGxvLII49gMBioqakhLi5OCQQ8PDyIiYnBYDBQUFBAYWEher2eu+++W1lRHfpbfzIzM3F0dCQmJkYJVJydndHr9Rw+fJienh7Cw8PRarVMnToVvV5PcXExp06dAuCuu+4iJCSEkydPkpycjKenJ4CynMKMGTMGBCjOzs7Ex8ej0WjIzc2lsLCQjo4Obr/9dlauXKmsaA79wcvEiROVbrLCwkIaGxtJSkrijjvu4MyZM0prGPS3wGRmZlp9dnnex8bGMmnSJKC/yzM6Oho7OztOnjxJcXExjo6OSjrOnTs36DlcyrL4pZ+fHxUVFeTk5FBZWYm3tzcrV64kOTkZtVpNdXU1H3/8McHBwSxdulQJIJ2dndFoNBw6dAitVktISAhqtZrQ0FBCQkKoqakhNzeXsrIynJycWL58uVU5qlQq/Pz8iIqKoqmpiby8PIqLi/H09GTVqlXK8S3bhoaGWpVjV1cX8+bNY86cOWRmZlrlj0ajwd/fX1lCQ61WExcXh1arpbq6moKCAqty12g0REVF4efnR2lpKdnZ2VRXVxMWFsYjjzxCVFSU1czAoepJb28vhYWFdHZ2Mn36dOlGFEJ876jM42UkLbB+/Xrl388999wNt/8bjdlsZteuXRw+fJh169Zd0/IJQgghhOgnY66+B0wmE1999dWAQdu1tbXk5eURGBg4YMC9EEIIIUZHxlx9D1y8eJHs7GzS0tKUcVyW9+cBrFy5csBSAkIIIYQYnXHVLbhhw4ZBB+bamk6nY926ddf9OOPJ5avEOzo6Eh8fz7x58675RctCCCGE+M64Cq5SU1OHNYX+WsXFxbFgwYLrfhwhhBBCfP+Mq25BS8Bz+vTp69KCpdPpiIqKksBKCCGEENfNuGq5EkIIIYS40clsQSGEEEIIG5LgSgghhBDChiS4EkIIIYSwIQmuhBBCCCFsSIIrIYQQQggbGldLMRQXF491EoQQQghxgwkPDx/rJFgZV8EVQGBg4FgnQQghhBA3iKqqqrFOwgDSLSiEEEIIYUMSXAkhhBBC2JAEV0IIIYQQNiTBlRBCCCGEDUlwJYQQQghhQxJcCSGEEELYkARXQgghhBA2JMGVEEIIIYQNSXAlhBBCCGFDElwJIYQQQtiQBFdCCCGEEDYkwZUQQgghhA1JcCWEEEIIYUMSXAkhhBBC2JAEV0IIIYQQNiTBlRBCCCGEDUlwJYQQQghhQxJcCSGEEELYkARXQgghhBA2JMGVEEIIIYQNSXAlhBBCCGFDElwJIYQQQtiQBFfie2n37t0888wzZGdnj3VSANi0aRMvvPAC5eXlY52UAZqbm3nppZd45ZVXMBqNY52cm57ZbKajo4O+vr6xTsr30oULF/j973/Ps88+y7PPPkt+fv5YJ0ncgG744Kq7u5vXXnuN5557jjNnzgz4vq6ujn/913/lN7/5DW1tbQO+P3r0KM888wyffPLJ3yO5QghxRYcOHeIXv/gFH3300Vgn5Xvn4sWLvPHGG5SXl+Pv709ycjJubm5jnSxxA7Ib6wRcK61WS3BwMKdOnaK6upqIiAir76urq2ltbaWrq4v6+npcXFysvj9z5gwqlYqQkJDrntbm5mb+8Ic/4OHhwT/+4z+i1+uv+zFvNkajkb/85S80NTXx85//HHd397FO0rggdevmodfrUavV8lAfA/n5+dTX17No0SLuu+8+VCqV8t3u3bvZuXMnTz31FLfccssYplLcCG744AogODgYjUbD2bNnmT9/vtUFcerUKcxmM0ajkYqKCqsgymAwUF9fj5OTE76+vmORdCGEsJKcnExycvJYJ+N7qaKiAuh/plz6HBFipG74bkEAf39/XFxcqK2tpbOzU/m8o6ODiooKfHx8cHNz4/Tp0/T29irfNzU1UV9fj6+vL56enmORdCGEEOPEpc8HIa7FTdFy5eLigr+/P1VVVbS0tODk5ARAfX09Fy9eJDExka6uLsrKymhtbcXDwwPo71/v6OjA39/fqhulrq6OnTt3UlhYiMFgwM7OjvDwcFauXIm/v7+yXXl5Oa+99hrTpk1j7ty5fPrpp1RVVQEwefJkHnvsMfz9/ZUum8bGRgAaGxv52c9+Rmho6KBdOCaTiQ0bNlBSUsKPfvQjYmNjle/MZjObN28mMzOTf/iHf1Cap00mE9988w0HDx6kubkZtVpNYGAg9957LxEREcqvMEuaAwICBhw7OzubjRs3Mn36dJ588skB5xgcHMyXX36J2Wzmpz/9KUFBQQPK4tLuqQceeIAdO3ZQXFxMb28vvr6+rF69mqioKKu/6evr48SJE+zevZv6+noAfHx8WLZsGTNmzECtVivdgaWlpcrfvfjii3h6elp1D16+L5VKNWg+WHR1dfHxxx9z/PhxOjo6cHJy4rbbbmPp0qXodLoB5TKcPB7t9pczGAxs2LCB0tJSli1bxtKlS1GrrX8PDbduVVdXs337doqLi+np6cHJyYnk5GSWL1+Og4PDFdMxlLKyMjZs2ADAj3/8Y8LCwoCRlcFwt7XUw4ULF+Lr68vOnTupr69Ho9EQHR3NqlWrrvoD6Up1uaGhYUDdt7B0By1fvpxly5YB/RMQTp48yVNPPUVBQYFV/UlJSWHRokXY2dlZldFwrwnLdXjp8SxpeOKJJ2hublbqlE6nY9asWdx7770D7iONjY189NFHFBYW0tvbi4+PD8uXL6eiooKjR4/yk5/8hClTplwxzy6vw5Z74f3338/EiRMxm8387W9/49ixYwPuVQD79+9n+/bt3H///SxatGhY+7QYSR4PxWw2k5+fz549ezh37pxS92fOnMny5cuVPLPkuYXl39OnT8ff35+dO3cO+O7y7sHLrzF3d3cWLlzI7bffrtxLRnI/PXfuHOvXr8fJyYnnn3/eqpu4qamJV155BYAXXnhBeaY1NjayY8cOsrOzMZlM6HQ6brnlFlasWGF1fQx2r7cYrL6LkdO8/PLLL491IiwaGxtHNc5Ao9Fw7tw5ioqKCAsLIyAgAOivQAUFBSxfvhwXFxdOnDhBZGQkEyZMACAjI4Pi4mIWLVqk/E1ZWRnr16+nurqasLAwQkJCcHFxobi4mIyMDCIjI5UHeXNzM+np6dTX15Oeno6TkxORkZEYjUbOnz/P6dOniYuLU7odQ0JCKC8vx8PDg/vuu4+4uDgmTJgw4IGp0WgAyM3NRafTERsbqzxkmpub2b17Ny4uLixbtgy9Xo/BYODNN9/k8OHDODs7ExMTg5eXFxUVFXz77bdoNBpCQ0NRqVRKml1cXEhOTra6OdXU1JCVlUVAQACJiYkDzvHkyZPKWJCEhIRBy8poNHL06FEaGho4duwYZrOZqVOnKvsvKCiwysOenh7+9re/sWvXLjQaDTExMfj7+1NXV0d6ejpNTU1ER0ej0Wjw9PQkIiKCmpoaAO677z5mzJiBv78/Go3Gal8ODg5W+XD8+HEmTJiglHNxcTFnzpzh5MmTylg9Pz8/6uvrKSoqoqmpiZiYGKVsRpLHo9k+Ozub+vp6Zs6cibu7Oz09PWzbto2CgoIhAysAtVp91bp14sQJ3njjDRoaGoiIiCAkJASTyUR+fj65ublER0fj6Og45PVlKVMHBwelzly8eJG33nqL7u5uq8BqJGUwkm0t9fD06dPk5+fj6+tLcHCw0jqdn59PbGzsFc/jSnXZYDAMqPsWlroSERFBeHi4Ul7V1dXk5uZy/vx5wsLC8PLyoq6ujtOnT6PVapU8Gek1YbkOLz2eJQ3FxcWcOnWKKVOmEBAQQGNjI6WlpbS2tlrV14sXL/Lqq69SVlaGp6cnMTEx9Pb2kpaWxtmzZ7Gzs1Pq2lAsdfjIkSN4eXkxbdo05V544sQJwsLC8PT0RKPRcOLECbRardW9ymQysXv3brq6ulixYgUuLi7D2qclUBhJHg/GbDbz5Zdf8sEHH9DX18e0adMICgpS6v7Zs2dJSEjAzs4OvV7P5MmTaWlpobm5mQULFrBgwQKmTp1KYGCgcq3W1NQo34WEhCg/TCzXWEtLC1OnTlX2lZmZaXUvGcn91MnJiYqKCsrLywkLC7MaunL69GmOHj1KYmIiSUlJqFQqzp49y/r16ykvL2fy5MlERESg1WopLCwkPT3dKm8Hu9dfqb6Pd62trXh5eY11MqzcFC1XAOHh4Rw8eJAzZ84wY8YMent7OX36NG5ubgQEBODh4YGDgwOnT58mOjqa7u5uzp49i7Ozs9XN/ptvvsHJyYkf/ehHVhXr6NGjvP/++xw5coTJkydbHbu7u5vHH3+cGTNmKPv561//Sl5eHmVlZcyYMYO4uDiam5vZv38/Li4uJCYmXnHQcWhoKG5ubpw6dYrm5mbloigvL6epqYnZs2fj6uoKQGpqKkVFRcycOZNHH31UCZhqa2t5/fXX2bNnD2FhYVe8EV1Nd3c3jz76KMnJycMai2AymbjrrrtYunQpKpUKs9nMp59+yv79+8nPz1fyMCcnh/T0dEJDQ1m3bp1ys+ro6ODNN9/k2LFjhIeHM2vWLKZOnYrRaOTbb7+lp6eHhIQEq4eDZV8pKSncfffdSj7U19fz6quvsnPnTsLDw61uYpMnT+bHP/6x0trZ0dHB66+/zokTJ0hMTFR+iY80j6+lTHp6enj//fdJT09n3rx53HHHHYMGVgA6ne6KdauhoYEdO3ag1WpZt26dMuawr6+PL7/8kt27d7N9+3aefPJJJai/mosXL/Laa6/R3t7OD37wA6tzGEkZjKa83N3defrpp5k0aRLQX8/+9re/kZ6ezldffcUjjzxy1fo5WF1uaGgY1rlfqre3l4kTJ1rV26KiIjZs2EB+fj7z5s2zahUc7jVxJQ4ODvziF7/A29sb6K9Pf/rTnzh16hQtLS14enpiNpvZt2+fEqxfWv9KSkp48803h7XMQ2pqKiUlJTz44IPMnTtXyavS0lJef/11duzYodSpSZMmUVxcTGtrq1JedXV1nDt3jvDwcCUwGO4+LS09I83jS9XV1ZGTk0NUVBQ//OEPcXZ2Br67PxcWFlJaWkp0dDQeHh4kJSWRl5fH2bNnCQsLs2qVCggIoL6+nuzs7AHfWa4xf39/nn76aeX8e3p62Lp164B7CQzvfqrRaIiLiyM3N5eMjAxiYmKUepOdnY1arSYuLg6VSoXRaGT79u0YDAbWrl2rPIvMZjPp6em8//77fPjhh/z0pz8ddUu1GJmbYswVgJ+fH66urtTU1NDV1UVrayvV1dVMnDgRV1dXPD098fX1pby8HKPRSGdnJ3V1dXh4eCgXg52dHWvXruXll18eELEHBAQorUSXi42NZfr06cp/29nZERcXB/Rf4KPh7e1NdHQ0TU1NytpHZrNZ+bWTkJCASqWira2NEydO4O7uzrJly6xaonx9fVm6dCnd3d2kp6ePKh0WsbGxww6sAKZMmcKCBQuU7VUqFTExMWi1Wmpra4H+G4ylFefuu++2uuidnJy4++670Wq1ZGRk0N3dfcXjWfbl7OzMbbfdZpUPPj4+JCQk0NTUpBzb4vbbb1cCK8txFy1aRF9fH5mZmQAjzuNrKROz2czevXtJT08nOTmZ+++//6pdH1eSl5dHfX09c+bMsZrMoVarWbhwIUFBQZSUlAzIl6F0dnaydetWmpqaePjhh0lISFC+G0kZjLa8goODlcAK+oPLZcuW4ebmRmlpKe3t7Vc9h5HW5aHo9Xruvfdeq3o7adIkfH19aWlpoaury2r74VwTV7NixQolsIL++0RQUJByzwNob2+nuLgYNzc3li5dapW3YWFhA7rlB2OpwwEBAUyfPt0qr6ZMmUJYWBg1NTU0Nzfj4OBAZGQkDQ0NVFZWKtsVFxfT2dlJXFwcGo1mRPu0GGkeX8rX15cXX3yRn/zkJ0pgBf3356CgIHp7ezGZTFfNi6uxXGNz5861+iFgZ2fHbbfdhlqtHrBM0HDr4NSpU/Hx8aGsrEzJl9bWViorK/Hx8VG6dUtLSykvLycmJsYq8FOpVMyYMYOYmBjOnTtHWVnZNZ+vGJ6bpuXK1dUVb29v6uvraWlpUf4/JSUFjUaDRqNhypQpHD9+nMbGRtrb22ltbSU0NNTq4Qr9i8h9+eWXypirq1Gr1QMukmv9daBSqYiLi+PYsWNkZGQQFxen3DQnTZqkPCgbGhpoaWlhypQpgzbxh4SE4OrqyoULF65pAcjBznGk2+t0OqsbfUdHB7W1tUrgezl/f398fX2pra2lo6Pjil0Yln21tLRwpZ7ujo4Oq/8erLXG19cXJycnGhoaMBqNI87jkW5/aQtmRkYGhw8fJioqiocffviaAivo7+bWaDSDNu87ODgQFhZGRUUFtbW1SgvuUHp6evjoo48oKSlh1apVVj8oYGRlMNryGozl2r9w4QJNTU0Dllu53Ejr8lBUKtWgXfo6nW7QdA/nmriay+ur5XiXstS/gIAApXV7pCz7MBgM/PM///Og2zg4OCgTiBISEjh06BB5eXnExMTQ09NDQUEBHh4eREZGjmqfMPI8vlxfXx95eXns3btXGXNla5aA5b333uO9994bdJtLA0YYfh10dXUlPDyco0ePUlRUxKxZs6isrKShoYGUlBSlrldUVNDb20t4ePigdSQmJoacnBwqKiqIjo4ezWmKEbppgiu9Xo+/vz/l5eXU19dz+vRp9Hq91UMlKiqKr7/+mvLycrq6uuju7h6wLlZ2djbvvPMOWq2WWbNmKb8M6uvr+eqrr/6u5xQSEkJAQADl5eU0NjZSU1OjXFSXB2+Ojo5otdoB+9DpdMPu7hkrTk5Og6Z9JDdRCxcXFyIjI4c85+HMCnV0dBzwwLJ8PpI8Hun2BoOBw4cP4+HhQXFxMXl5eUrz/rXQ6XQDfkBYjORHQEVFBQ0NDTg7O/P1118TGxtr1YpiMZIysEV56XQ6XFxcOH/+vKxqfglXV1fs7e2vaR9eXl5DDie4tF4FBAQQFBSkdA12dHRQVVVFfHz8gB8Yw93ntert7WXbtm0cPXoUX19f7rnnHiXYzMnJsenbGTQaDVOnTh0y7cHBwaPar0qlIikpifT0dOV+kJeXpwRMlxvqB6h0Bf793TTBFUBERASHDx+mpKSE8vJy/Pz8rG7+AQEBypIMarUaR0dHqy4Go9HIwYMH0Wq1PPfcc1bflZeXc+DAgb/r+Tg5OTF16lT27dtHaWkpZWVl6HQ6q64Yi87OTrq7uwc8zDs7O23S9H09dXR00N3dPWAMWnd394gCK+jPs1WrVl219eJKWltbB23lG2kej3R7lUrFnXfeycyZM3nttdf4/PPPmTx5Mj4+PqM+F+gf6zNUPl7+i/pKnJycePrpp2lpaWHz5s188cUXPP744wNaXoZTBpbj2qK8TCYTbW1tg7ZyfB9ZWkUsiydfy4KyEyZM4JFHHhn0R8KlLGP/tm/fTmVlJRcvXsRgMChjgkazz2tVVVWlTAy4dBwXoIyfsqV58+Zdl1ahwMBA5Ud2ZWUlpaWlBAYGWj2fLIa6nkdynQvbuKnuRAEBATg7O1NQUEBtbS1TpkyxurG4uroyceJEKioqqKqqwsvLy2qGgdFopKmpaUBQBv3Ny2az+e92LhYJCQno9XqOHDlCUVERQUFBVt03Xl5euLm5ceHChUEvoMrKSqvlJmzVJWILllmUjY2Ng445qa2tpbGxUemmuxK9Xo+Hhwf19fUjej/fYOvanD9/HoPBgJeXF3q9fsR5PNLtLz2HadOm4eXlxcMPP0xbWxvvvvvusLqmhxISEkJvby/FxcUDvjMYDFRVVeHs7DysRXT9/PwICAggISGB+fPnk5mZyd69e5XrYiRlMNryGkxraysXL17E1dVVmfgxUhqNZtxcF9fKw8MDV1dXGhoaBn3l13C4ubnh4OBAZWXlsMeDRUdH4+zsTFZWFqdPnyYgIMBqnN9o9nktmpqaMBgMhIeHD2iJtmULp7+/P729veTk5FyXZ4SDgwOxsbG0tLRw8OBB6uvriY+Pt7p3TJ48GY1GoyzxcSnL9a/RaJRJEzdTfR+vbqrgysPDAx8fH2Usy+UDNzUaDVFRUVy8eJHa2lp8fX2tpm5buqHOnz9PdXW18nlHRwefffaZzV5aazKZhr1YnaW5vbS0lPr6eqKjo61uFC4uLspssd27d1uNKbh48SJffvklWq1WWfHZcuOtqanh4sWLyrZGo5GMjAybnN9wabVakpKS6O7u5osvvrAKIgwGA1988QXd3d0kJSUN+JV7+WBUvV5PYmIifX19fP7558q6T/DdjJlPPvlkQIvRN998Y9Wq09LSQlpaGmq1WhlTNNI8Hun2g4mIiGDx4sWUlJQoa+EMx+V1KyYmBnd3d44cOWI1mNVsNpOamkplZeWAad5Xo1KpWLp0KVOmTGHv3r3KYN2RlMFoy+vyB3NPTw9ffvklLS0thIeHWw1cHglvb2+cnJyUtfIsWlpabrgX9zo7OxMeHj5o/SspKeH06dNX3Ye7uzLEMywAACAASURBVDvR0dF0dHTw+eefW12bfX197Nmzh3379lkFKV5eXgQHB5Odnc3JkycHdJONZp/XwtL9fvnY2ZKSEtLS0ka938t/7MTFxeHi4kJGRga5ublW39XU1LBp0yZl/b7RiouLw9HRkaysLPR6/YBnm2XGZkFBwYAWOcuSRJeO1b2Z6vt4dVN1Czo4ODBx4kTOnj2Lt7f3oM2mwcHByqy/y19xYFn7aceOHaxfv14Zb1VeXk5fX5+yTtTlg5CHy/JrvaysjNdff53AwECWL19+xQeCTqcjKSmJoqIi3NzclFmIl7rjjjuUtYFKS0sJCQmhq6uLoqIiTCYTy5cvJzQ0FEBZc2n//v38z//8jzLYtKSkBIPB8Hf/NTNjxgyKi4s5duwY//7v/261rk9bWxuzZs2yGndkb2+Ph4cHpaWlvPHGG4SGhrJw4UL8/f2ZO3culZWVHD9+nP/8z/8kMjISvV7PuXPnOH/+PBEREQOC2pqaGv7t3/5NyYeioiKMRiMzZ85U1iKCkeXxaLa/nEqlYuHChRQVFfH1118TFBR0xfFXQ9UtHx8f7r//ft577z3+9Kc/ERERgYuLi5Invr6+3H///SMel+fg4MADDzzAhg0b+Nvf/sZPfvITvL29R1QGoymvhoYGfve73xEZGYmDgwMVFRXU1NTg4+PD4sWLR11/J0yYQEhICHl5efz2t78lPDwco9E4aEvAeKdSqVi8eDGnT5+2qn8NDQ2Ul5djNpuvOhZLpVJx7733cuHCBQoLC5VrU6PRUFZWRkNDA7NmzaKvr0/pitVoNCQlJZGbm4tWqx0wJmg0+7wWoaGhhIaGcubMGX79618TEhJCbW0t9fX1yv4vXLgw7PcE+vv7o1Kp+PTTTzl16hRxcXEkJSUxadIkVq1axdatW/nrX/9KcHAw3t7etLW1cebMGRwcHGhvb7+m7n1/f39CQkLIz88nJCTEajFr6L8eV69ezRtvvMGWLVtIS0vD29ubixcvcvbsWZycnFi9erUy9upmqu/j1U0VXEH/lN7Dhw8TFBQ06EwZHx8fZVbhYOvKLFmyBHd3d7744gtKSkrQ6XRMnz6d+fPn89e//lVpah5tcLVy5Uo2b97M2bNnMRgMw/qVFhYWhpubG8HBwYMulObg4MC6deuUVY/T09OHXA1cpVIpq3IfPHhQWaj0lltuIT4+nnfffXfE53Ut7OzsePTRR4mMjGT37t1kZWUBKEGBZYV2C8sN2vKg6Ozs5Pbbb1f29YMf/ICpU6eye/du8vLyAJSFNS9dKRn6y+PJJ5+krKyMgwcPKqs/r1ixgpSUFKuxRCPJ49FsPxhLAPPnP/+ZTz75hICAgCFn9F2pblkWWt2+fTtnzpxRVqmeP3/+Na3QHhISQkpKCjt27OCjjz7iySefRKfTDbsMRlpeAPHx8cTGxrJz506amprQaDTEx8cPa4X2K7Gzs2PNmjV89tlnHD9+nMzMTGW1/kmTJrF169ZR73sseHt787Of/Yxt27ZRWFhIRkYGPj4+/OAHPyA3N3dYrVcODg48++yzSh3OzMxErVYzYcIE1qxZM+DahP464ePjg4uLy6A/bkezz9HS6/X8+Mc/Vso0Pz8fd3d37rvvPjw8PHj77bepqanBbDYPKyiPjY1l4cKFpKWlkZ2dbbW6/aXXWHFxMaWlpTg4ODB9+vQBq6OPhqWeFxQUcMsttwz6Yyg4OJhf/vKXygrtpaWlyo/zy9Nws9X38UhlHouBREMoLi4mMDBwrJMx7uTn57Nx40YeeeQRZs2aNdbJEeLv7tLXhlz+ug4xfJbXSNXW1vJP//RPV11+Q4wPltcMFRYW8vzzz1/zJJebTVVV1bhbTf6mGnN1MzIYDOzbtw9PT0+rbiohhBhKfX0977777oBZogUFBVRUVDBx4sRBl9EQ49PZs2fJyckhOjpayu0GcdN1C94sKioq2LNnDyUlJXR2drJy5cpRvXdRCPH9U1ZWRmZmJtnZ2cpYNsv4GwcHB1asWDHoWm5ifPnmm284ceIE5eXlODg4MG/ePJnld4OQ4Gqcam1tJT8/H51Ox4oVK5g7d+5YJ0kIcYOYOXMmQUFBVm+a0Ol0JCYmsnz5cuXl9WJ8q6iooKSkhAkTJvDQQw8NOo5NjE8y5koIIYQQNywZcyWEEEIIcZOT4EoIIYQQwoYkuBJCCCGEsCEJroQQQgghbEiCKyGEEEIIG5LgSgghhBDChiS4EkIIIYSwIQmuhBBCCCFsSIIrIYQQQggbkuBKCCGEEMKGJLgSQgghhLAhCa6EEEIIIWxIgishhBBCCBuS4EoIIYQQwoYkuBJCCCGEsCEJroQQQgghbEiCKyGEEEIIG5LgSgghhBDChr53wZXRaOSVV17hpZdeorm5eayTc9MqLy/nhRdeYNOmTWOdFCEAyDnXS8J/thL6Ugt7CrvHOjnjzk//t5PQl1pYvbGd9i7ziP9+T2E3oS+1SP4KAdiNdQKEENDT08PXX3/NwYMHaW5uRq1WExgYyH333UdERIRNj/XT/+1kV3430ydrePtxJ5ztVVbf55zrZe2WDtqMZv4pRc9PFtjb9PhidAzdZj7P7WbzkS7ONvTR2weuehUpUXY8v0jPRPcb/7dybWsfK9/s4HxL34DvNGqY6K7mkWQdj83U4aBVDbIHIcaHG/9qFOIG19PTw/vvv8/27dvp7OwkOjqaiRMnUlVVxZtvvsmZM2fGOoniCtq7zKze2E7oSy389H87r8sxzjf38eBbHfzqMwMl9f2BFUCr0cynOd3csb6dnXk3d2tRbx9UNvbxuz1G7n29narGgQGYEOOFtFwJMcbKy8vJzs4mODiYZ555BicnJwCOHz/Oe++9x4EDBwgNDUWj0YxxSsVYMHSb+dXnBgov9KKzgydm2/PQDB06Oyiu6+PlnQbOXuzjN7sMhE5QM9Xv5qgnd8VqefVBR6A/sKpp7WPzURNbj3VRUt/Hb/cYefVBR+yu4+kau820GMyo1So8HVVopDlCDJNUFSHGWNf/Z+/O46Ks9geOf2aGAYZhERFxQx0UFAGVxF0rlzLLLE0tt2ulpWVqt7pm3vJVanbz3n51yy3NJbVrpVku2WJW7oqS+4JsKqKCgAIOA8zy/P6YZnRkQLAxy77v18s/HM6c55znOfM83znblJai1+tJSEhwBlYAzZo1IygoiOLiYszm27tXQlRsR5qFHekW1Cp4uZeOf9zrS3hNNWGBaro09eL9R/0I9lORZ1RYd5v2XjmGBCf18uXBlloA9mVaOHPp5vZe/XzCQqeZRfSfe5ncy9JTJqrutuq5ysrKYvXq1aSlpVFWVoZer6d9+/b06dMHX1/f674/Pz+fdevWsW/fPsrKyvD29iY+Pp4HH3yQmjVruqS12Wzs3buXDRs2cOHCBQBCQ0O5//77SUhIQK1WV5q3Xq+nXbt29OnTB51O57Y8ZWVlzJ07l9TUVJ5++mni4uKcf1MUhcWLF5OUlMSoUaOIj4+vVh1OnjzJBx98QL169Rg7dqzL+dm3bx8LFiygTZs2jBw5EoANGzawfv16hg4dyrFjxzh48CCNGzdm7Nix+PhUPidn7969rF+/ngsXLqDRaIiKiuLRRx8lNDS0XHlatGiBwWDgm2++QVEUxo8fT8OGDat8vnfs2MHy5ct56KGH6NWrlzP/nJwc3n33XerUqcMzzzyDt7c3AFarlfnz55Oamsq4ceNo3LgxcKUtpaSkYLFYKr1eZWVlbNmyxTlfysvLi8jISPr370/9+vWd6RYuXMjRo0cZMmQIP//8MxkZGSQkJPD444/z5ptvur3+Vqu10nP7e7lcqvDkUiNJp6yMvdsHHy8Vi3eUcrFYIdhPxROdfHiys+s8GEWB9YfMvLephNP5Nrw0cFeklgdbavnnGhMASx7X07qBhm+PmBm7opgAXxVvPqRj8c5SDmRaGd/dPufLaoN1B83M3VzinG8U6q/igThvnu/hQ4Cv6/yb4+etvLrWxMEz9vPXsoGGAfHeFdbPZFZYvruMxTtKyS5U8PaCFnU0vHiPLx0jvFBVML3HMX/N4etDZr4+VODS61JUorBgWymf7y3jwmUFjRpi6mr4x72V5+04h+sOmrFYwVBLzf2x5W/ZjUPURNbWkHjSwr7TVorLFPy8Vb/52A5Zl2y8+0MJm45bKCyp2rk5c9HGmE+K2ZxiL3tELTX/vF9H16ZVO2ZFvDTQOEQDmDGZ4ZJJcWmbD8RpiQ/X8N6mUgJ9VawarScsUI3JrLB6n5kPt5Ry7tf5XA1rqhnXzR6sSY+U8LTbJrg6dOgQS5YswWw2ExUVRUBAAKdOneLHH3/k9OnTPPPMMxUGMQAZGRnMmzcPo9GIwWCgVq1a5ObmsmfPHo4ePcqYMWMwGAyAPbBZuXIlW7ZsITg4mLZt21JaWkpycjIff/wxubm59O7dG9WvdxFH3iaTyVm2M2fO8PPPP5OZmVlh2by9vWnfvj0nTpzg4MGDxMbGOvO8dOkS6enphIaGEhERUe063Kg1a9ZgNBoJCAjAz88PRal8VdGBAwc4cOAAjRo1onHjxqSnp3P06FFmzZrFuHHjqFWrlkv6o0ePcuDAAbRaLUFBQYB9TtKKFSvYtWsXAQEB3HHHHQCkpKTw8ccfk5yczODBg/Hy8qJx48b4+/uTnp6O1Wp1DqWlp6dTWFiI1WolNzeXevXqAXD58mXOnj1LvXr1qFOnDgBHjhxhwYIFALRo0QJfX1/S09P56aefyl0vk8nE/PnzOXHiBPXq1aNZs2YUFRVx4sQJ3nvvPZ555hnn9QF7wPS///2P0tJSAgICnEHetRRFYc+ePRQUFNC+ffsqfTn4vXy4tRTLVTHfxWKF//uhhIw8K2897If212Ga+VtLeeeHEuf8oDILbDxm5qdkMzYF9D7ln7Ims8Kra0wUllxpV2YrvPJVMV/uswcx3l7gpYYLlxWW7Czl6DkrHw7zI/DXAOuX01aeXm7kYvGVPPadtrLvtMltfQpLFEYvLybxpMX5WpnFPrF/5DIjM/v7OXtLrhXsp6JOoJr8YhtlFtBpVdTwUxHsZy9LZr6NUcuMpF640uthtcHBLCsjlhiZ0N2XsXf7VBhwXC5VOHPR/t6IWhpq+pWPAvy8Vcwb6keJWUGrUTkD3N96bCh/Lv19VBSXKdc9N299W+Ly/9QLNkZ/Uvm5rApFgUu/lkWrAd01WW1JsbDxmJkyC872UFiiMHZFMTvSLC5pT+bZeHFVMdvStC7tVghPuC2Cq8LCQr788kvUajXjx4+nadOmgP2hvGrVKrZs2cL27dvp2bOn2/eXlJSwevVqTCYTjz/+OAkJCYD9AZeYmMgnn3zC559/zvjx49HpdFy4cIH9+/dTv359xo8fj7+/PwAXLlzg/fffZ9u2bbRt25bQ0FBn3lqtlnHjxtGgQQNn3hs2bODrr7+utGxNmjQhKCiIY8eOcenSJYKDgwF7T8/Fixfp1KkTgYGB1a7DjdLpdLz44ouEhYVVKb2Pjw+jR492XhObzcb69ev59ttv+e677xgyZIgzYAQwm80MHTqUdu3aOV/fu3cviYmJNGnSxCWwMRqNfPjhh+zatYvIyEg6dOhAzZo1CQsLIysri8LCQoKDg1EUhaNHj6JSqTAajSQnJzuDq+zsbAoLC2ndujW+vr5YrVa2bNmCoiiMGTOG6OhowN6Wli5dSlJSEgcOHKBDhw4A/PTTT6SmpvLoo4/StWtXZ5nT0tKYM2cO69atK9dTVr9+fUaPHu0yBHg1RVHYtm0b3333HXXq1KF79+7VukY3W4CPihkP6+jWTEveZRuvrTXxY7KFdQfN9I4x06O5llN5NpbuKsNqg2ZhGv79iI7mdTQczLLyjy+Kych1P8RisYJZDY+19aZ9Yy/Ca6rZkWbP20sD/+ytY2g7b9Qq+OqAmclfFbP3lIUfjpnpH++NyWwP9Bw9ao5ynr1kn5u0JcX1Aaso8O/vS0g8aSE8WM0Hj/kRW0/DJZPCa2tMfHPEzH9/LKG9QUPtgPKBzRsP6vjHvVd6Tro393L2WFms8N6PJaResDl75O6L0VJYciXv2ZtLiK2v4e4o97fi4jKF7EJ7MOGrpcL5RUE6FUG6K58jTxy7xKzw3x/t5zKuvobZg/2oX0PNpWKFMf8rZs9JCwu3l3JXlJczkHG4+pofPWdl3KfFZF60VXour6fErPDVATNf7CtzHqNhTbUzeAd7T12zMA1D2nkT7KdC76Ni8Y4ydqRZ8PaCCd19GdnZhzKLwgc/lbJweylr9puJrVfG4x0r7tkUorpui87QtLQ0srOzad26NU2aNHG+7uXlRffu3QkKCiIlJaXCIZa0tDROnjxJbGysc3gNQKVSkZCQQGxsLGfOnCE9PR2wP2jd5VWrVi1GjBjBgAED8PPzc8k7Pj7eGVg58m7btu11y1arVi1iYmK4ePEiJ0+eBHAGC2q1mtatW6NSqapdhxv14IMPVjmwAoiKinK5Jmq1mq5du1KzZk2OHz9OYWGhS/q4uDiXwMpsNrNz5040Gg19+/Z1CQz1ej19+/ZFq9WyZ88ezGYzvr6+REdHU1BQwNmzZ4ErvXzt2rWjQYMGHD9+3Hm+MzIysNlsNG/eHLAHfxaL6wMY7G2pb9++PP744zRs2BCAoqIi9u7dS7169WjTpo1LkNi4cWOaNm3K+fPnXfZT8/X15eGHH64wsAJISkpi5cqVBAUFMWrUKAIDA6t2sn8nnZp4cW8LLVoN1AlS8+r9OuoEqrFY4cdk+7lLPGnhfKENP28Vrz3gS0w9DRo1xIdreL575b1w/7zflzcf0tG3lX2Ip71Bw5YXA9j6UgCD23qjUYNKBZ0iNNTSq7EpkHXJHoCczreRnG2/tmPu9HGWs1GImgk9fMsNH57Ot/HjcXuZx97tQ1x9DSqVvUfqpXt9qR2g4lSezTm8WB1puVZnMDeysw8PxNmHn4L9VEx7SEdMPQ1lFvhqfxnX6QC+Jcf28VLx7kA/dkwM4OPH9c6tHmr4qejcxB6Q5V1WMJWVz2B8dx/nNY+rr+G1B3zx0kDmRRvHzlVt7tLXh67sm9Xk1QJi3ijkn1+ZKCpRCNGrePEe33LbMUTX0bD8ST3D2nvzQJyWUgusO2gPxu6P1TKqsw9ajb3X9MV7fOnRXItNgQ2Hy7hcqrjs1dXk1QLGrrCv/jxbYKPTzCLn613/XUR2oczBEhW7LXquzp07h6IoGAwGlwccQO3atXnrrbec/3c3MfjUqVNYrVYiIyPLrcjSaDTExsayf/9+Tp06RUxMDLVr1yYmJobdu3czffp0WrVqRcuWLYmKinL20Fyb96ZNm9i0aZPb8jsmLLtbDaZSqWjZsiW7du1iz549tGzZksuXL5OSkkKDBg2cQ07VrcONqu6KNbVaXe6a+Pv7ExYW5ux9cwz/uUtvNBrJzs529khdq27duoSFhZGdnY3RaKRGjRo0bdoUtVrN8ePHiYmJ4cyZM1y+fJk2bdqg1+vZu3cveXl5hISEkJ6eTlBQkLMnS6vV0rFjR1JSUpg7dy7R0dG0atWKmJgYatWq5TKMmZeXR0FBASaTiX/84x9u66/T6SguvrI8X6VSlZuPd7UzZ844A6uxY8c6hyr/yOrXUBNTT8P5QhvpF2wUlymk5NgfPHWDVESFubaZ663ucgypOfhqVSjAisQyPttb5pxz5U5Gro18o4K/j4o2Da9/ezuZZyPXaM9s0pcmJn3pfugwJcdGz+jrZuciM9/GpWJ7Wbo0cS1LsJ+KuPoajpy1cuaijculSrnA77fwxLFVKqipV3H8vL3HaVe6xWW4tjqahGqo6acip0jh6Dkrd1XQW3Y919vXy8/HPmzscK7ARu5le5m7RWld2p5WAx0jvNh4zEzWRYVCk4cjXPGXdlsEVw6/ZbgLoEaNGlXK18vLi8GDB9O0aVM2btzItm3b2Lp1K2q1mtjYWAYNGlRuAnxUVJRzSO9atWvXrjRoiYiIoF69epw8eZL8/HzOnz9PXl4ePXr0KFe2qtbhVtJqtc6evarS6/VoteXnamg0Gry9vTEajc7X6tatS0hICKdOnaK0tJRDhw5Rs2ZNGjZsiFarZevWraSnp6PVasnKyiIiIsLlvLVp04bg4GA2bNjAsWPHOHToEGC/ToMGDSI6OtolAAwJCSkXVDt4e3tX2kt1rdOnT1NUVESPHj3+FIEV2IMl318vjVVRsCn2ZfNg7+Xw+Y13mavnRHl7QecmXoT4qygpg80pFord9JyoVKCqQr+8yaxgsYL610BCq3Ef4Oi8qx/4WGxgUyouS51A+4vZhQrFZe4DHD9vFWGBKs4WQInZPtxXla0HPHFssE+mn7i6mDKLfQJ4t2ZeqNWQfN7G0XNV783TacFLrQIULFXs8Ll6UcCNcpwHcH/ewgLt9TaWKeRcVrg7yosdEwOcf//phIV/fmUiLFDF/GF6Qv3t6R1bMwhRkdsquPqtK6sq+jkcd697e3vTuXNnOnfuTElJCWfOnOHHH3/kwIED5OTk8Pzzz7sM57Rq1Ypu3brdULn0ej3R0dFs3LiRtLQ00tPT8fb2pnXr1r+pDrdKaWkpBQUF1XqP0Wh0DvtdzWw2uwRWYO8Za9CgASkpKZw5c4bU1FSaNGmCv78/devWpWbNmhw9epTg4GCKiopo3ry5S7CkUqlo0qQJ48aNw2KxkJuby969e/npp5+YN28eTz/9NLGxsc70tWvXZsiQIW6Dv+oqLS0lODiYyMjI35zX9ZSU4fZBp9hwDhXVCbz+A8RitT/4ATQqFWrVlYd3Rceojh+Omdl7ykKAr4qFw/W0aWR/SmYX2jjwodEluPJS2wMlRbHX43oc6dVqmNnf74Z7VCrLu6KyOALQsECVc3Xftfx9VDQIVrMv00p6rpX8Ylu5+UolZoXRnxSzLdVCu8ZeLPybn0eOXViisHB7KWUWeLClln8/cmXS9wc/lVYruLJeHeT8jpNRHOcBcFmE4eCYz6b3VlHbX4Wv1v7Pocav89g0KhWh/irCAm+LmTTid3BbtJS6deuiUqlITk4ut3rt0qVLzJw5kwULFlBWVub2/Y0aNUKj0bid+2S1WklJSUGj0dCoUSMADh48yMcff+ycv+Tr60vTpk0ZOXIksbGxZGdnO+dHOcp28ODBCo9fFY4J19u3byc5OZmGDRs6h7JupA7uhutuBpvNVu6aFBYWkpubi5+fX4W9eQ56vZ6wsDDy8/PJzs4u9/fs7Gzy8/MJCwtz9hCpVCri4+MpKioiMTGR/Px8WrRogUqlIiAggKZNm5Kens7evXvx8fFxbr8A9kUJK1as4Oeff0ZRFLy8vKhTpw59+vThkUcewWKxsHfvXgCCgoLQ6XScPn3abdluRLdu3XjzzTdp2bKlR/JzJ7K2/QmZkWfjmJsHZNJpC5dLFXy1KsKDr3+LyC+2kZ5rzyciVI2ft4rI2mrnMVKyXY/h7iFXmaxL9t6wAB8VDYKvtNmrH9gO4TXV1PBTcblUIel0+blz13Kkt1hh7cGycmWzXhVoVldlZblYrHAoy36wBsHqcj9B5KBSwZ2RXqhVcCrPxs8nytcpPdfGkbP2vOIbavDzVnnk2KYyhbxfh9SahGpcVtNZrj3x13HsvJVco307jhZ1f79leXWD1NT6tbdpR7rF5VqarbAz3X5u6gerCNRJT5TwnNsiuGrSpAlhYWHs37+fjIwM5+s2m43t27dz6tQpDAZDhcveIyIiaNCgAYcPH2bfvn0uf9u3bx+HDx92md+kUqnYu3cvX331FSbTlTkaiqI4H8iOHhZH2VJTU9m8ebNLoGE0Glm0aFGVJpnXq1ePhg0bkpaWxoULF4iJiXGpT3XrEBwcTGBgIOfPnyc3N9eZtqSkhD179ly3PFV14sQJ0tLSnP+3WCxs3bqVS5cu0bx58+tO1tZqtbRt2xaz2czatWtdzrfJZGLt2rWYzWbatm3r0nNUv359AgIC2Lp1K/7+/i4BVKtWrbh8+TJ79uwhLCzMZQjX29ub5ORkvv76a5e2BFd6RgMC7MMGNWrUICYmBqPRyJo1a1zKZrPZ+Pbbb9m4cSM2W9W7bhRFwWg0Vus91dUz2osQvX1J/StfmtiWaqHErJBvVFi6q4xZP5cC0LqBhtj65R+EO9Lsq/OsNvvqrKnrS8jItT84uzez9/y0a+xFnUA1xWUK074uISXHitUGh7Ks/GdjSbk8K+Po6cg12tiWasFqswcIM74pcfbAODSsqSa2nr3M87aUOst5Ks/GfzeVUHTNnKEmtTTcGWkv87qDZt7dVIKxVEFR7HtlPfbRZcZ/Voyxkh8ytvd62AuZmmPjXIF9W4aIkCt5X10WY6l9ReOxX3dcf7i1d6XbIdzbQku7xl7YFJi6voSPtpVy4bLCpWKFbakWxn9WzMVi+yRvxzYHV9frRo+tUuHc/2l7moULlxWsNntP4ie7K/+i+P6PpRw5e+WaT/u6BIsVwoPVRNf9/R479nNiv09+ub+MZbvLMFvtgdXC7aVsTjGjVsH9sd4VBplC3IjbYlgwMDCQvn37smzZMt577z2XvaTOnj1LVFQUnTt3rvD9Op2OQYMGMW/ePJYsWcLmzZude0RlZGSg1+sZNGiQc95SdHQ0CQkJ7N69mzfeeMM5iTw9PZ3c3Fzi4uKcD/PAwECGDRvG3Llz+eqrr0hMTKRBgwaUlJSQnJyMzWajZcuWLnshuePt7U3btm1JTk4mKCioXM9Gdevg7+9PbGwsP/zwA//3f/9Hs2bNAEhNTcVkMnmsV8tmszFr1izCw8OdE8hzc3MJDQ2lV69eVTpOQkICKSkp7Nq1y3m+wb7PVVFRER06dHBuPeEQHBxMvXr1KCgooGHDhi5BXMOGDalZsybnz58nOjraZagxKCiIPn36sHTpUpe25DiPQUFBdOzYEbAH2Q8//DDnzp3jyJEjA0rfNwAAIABJREFU5dpCXl4eHTp0wGazVTqJ/WqbNm1i9erV3HXXXTz66KNVek91RdfRMOUBHZO+NJF50caIJcZyaZqGqvlXP53bB87FYvsw1LUebKnlzkj7w71RiJpJ9/kycXUxR85Zue/9yzdc3j5xWlb+UsapPBsTV5uYuNoexKpV9sDLYrsyzKXTqhjXzZdDWcZy5XQMEV3d6eKlgYm9fDmVb2PfaSvztpQyb0upy/HbNlZhrqS3TaWC1uEavjliJjnbSpd/Fzl/FPv57r4czrL+us+T6zlTq2DsXb7cFVn5bdjfR8W/+umce1a99W1JuX2kdFoVUx7QOX/6xkvDbz52qL+aAXd4898fS9hz0kKHf11Z2euYv1RiUbhkUgi76juSWgVpF6z0neN6zR1bIdzINgy/xYiO3iSetLAjzcIb6028sd510UK/eC1D27n/4q3TqqgbpCYsUIVaLcGXqLrbIrgC+7BZaGgoq1evJjU1lbKyMmrUqEG/fv248847K+y1cjAYDLz88svO3c3T0tKcAc21u5t7eXkxdOhQ54T2X375BbA/0Pv371/ueBEREbzyyivOvLOysvD29qZJkybldvGuTNOmTQkKCsJgMBASEvKb6qBSqZy7jTvmijl2c2/VqhXLli2rUpmu5+6776ZJkyasWrWK9PR0NBoNrVq1YuDAgeUm/VfEcb6bNWvGhg0bnOc7NDSU/v37u90RX6vVEh0dzbFjx4iNjXVZMODv70+TJk3Iy8tzOxE9ISGBkJAQ1q9fz4kTJ7BYLOh0OrfnUafT8dxzzzl3aE9KSkKtVlO7dm1GjBjhtmyV8fHxwcvL67rDpb9Vn5ZamtVRM/vnUjafsK8CU6vswyiD2ngzoqN3hZOcezTX0qqBxu0O7VcPHfWJ0xKk0/PmBhPpuVd2aO8Y4cXUr92vynMnvKaaZU/omfZ1icuO3y/d68uyXWVsT7OQkmPlcql9ddwdDTV8OkrPpK9cd2h/vrsvU782kXbBtbcrLMCe//LdZSzbVebcwbtukJrhHbwZ1t673JL/aw1u682pfBurfrEPLQb62ueehddUs2q0P+9tKuXrQze+S3p4TTVfPevvUkabYt+lvlszLc918ym3eu63Hlulgmfu8qFukIp3fighu1BBp1XRL97ek/bSF8VcKlZIv2Cj2VUrQmsHqHlvkI6F28vc7tD+ewv0VTF/mF+561uVHdrvivJi2z8C3P9RiEqolOttsf07SklJITw8/FYX4w/r0KFDLFiwgCFDhjg3sRTi93DtT4xUZRWXY07UtTtff/BTKe9tKqF2gIoVo/xpHHJbzE4QQtwimZmZv8sioOqQu9qfhMlkYuPGjdSsWdO5a7gQf1SKAnM3l9Jv7mUOZdnn3jjm63y80z7sFh/uRQM3exUJIcSf3W0zLHi7OnXqFN9++y2pqakUFxczYMAAl003hfgjMpYpHD5r5dh5Kw/PLT/fqlGImlfu863Snk1CCPFnI18b/+AKCws5dOgQVquVBx98kK5du97qIglxXf4+KmYP9uO9QX7E1NU457SE+qt4vKMPa57xJ7ym3H6EELcnmXMlhBBCiD8tmXMlhBBCCHGbk+BKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8SIIrIYQQQggPkuBKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPMjrVhfgWpmZmbe6CEIIIYQQN0ylKIpyqwshhBBCCHG7kGFBIYQQQggPkuBKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8SIIrIYQQQggPkuBKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8SIIrIYQQQggPkuBKCCGEEMKDvG51Aa41ZcoUvvzyS4xGo8fz1uv19OvXj6lTp3o8byGEEEIIAJWiKMqtLoTDlClTWL58+U0/zrBhwyTAEkIIIcRN8YcKruLi4m5Kj9W19Ho9hw4duunHEUIIIcRfzx9qWPDqwCo9Pd3j+UdERJQ7jhBCCCGEJ8mEdiGEEEIID5Lg6k/GZrNhNBr5A43mVtnKlSuJiIhg9+7dt7oo13Xx4kX69+/P888/j8lkqvA1cfOkpqbStWtXZs6ceauL8oekKApGoxGbzXari+JWdT8vf/T63EwzZ86ka9eupKam3uqiyH3OQyS4ugE2m40dO3YwevRooqOjiYiI4P7772f+/PlcvHjxph77o48+Ij4+nu+///6mHkcI8cf2/fffEx8fz0cffXSri+IRt1t9xF+bBFfVVFxczPTp0xk2bBiJiYm0b9+evn37YjQa+de//sXgwYM5ePDgTTu+v78/QUFB1KhRw/mayWTi+eefp3///jc9uBN/DLeqF3D37t1ERESwcuXK3/W4ojw/Pz98fHyoVavWLS2Hp3o6/ij1qQ7p5REV+UNNaP+jUxSFpUuXsmTJEgYOHMhrr72Gv78/YO/N2rhxI5MmTWLatGnMmjWLsLAwj5dhyJAhDBkyxOP5CiH+XLp27XpbrXq+3eoj/tqk56oaMjMzWblyJW3atOGFF15wBlYAarWae++9l1deeYWkpCQ2btx4C0sqhBBCiFtFgqtqSEpKIiMjg4ceeshtr5RKpeKuu+6iRYsWbN261bnlg6PrePLkyezfv59nn32W6OhooqKiGDFiBIcPH8Zms/Hzzz/Tt29fIiIiiI+PZ/LkyWRlZbkc4+rhIMdwYExMDGvXrmX//v20adOmSsODZWVlrFu3jgEDBhAVFUV0dDTPPvssR48eLZfWZrOxdetWhg4dSlRUFFFRUQwdOpStW7e6nXxaUlLCokWL6Ny5MxEREfTt25d169ZhNpsrLcu1dT99+nSldXBQFIXDhw8zbtw44uPjXY5ptVrLpT99+jSTJ08ul7asrKxKx6uqa+vlOMeHDx92WZDgmLjtbmjBMQznmNTtuP4vv/wyAIMHD3YZHly5ciVdu3bl8OHDLFq0iB49ehAREUGPHj1YtWqVSx0d7cfdRFpHm3W0JUc5Bg8eDMDLL79c6fCg0Whk9OjRDBw4kEuXLrn8bfHixcTFxfHLL7+4vH7kyBHatm3Lv//973ILNo4ePer83ERHRzNhwoRynw1357yituQ4jzt27HBJ3759e+bNm0dJSYnbel3r2rbUo0cPFi1aRHFxsUs6RVFISkpyaaOOtI5jKYrC7Nmz3Z4bgHXr1hEREcG6desA90O0N1Kv3Nxcpk+fTnx8vPOzvWPHDhYtWlTpJGtHG2nTpg379+9n7dq1xMTEuG3HWVlZzvN09X3vahUNOVf1c+TO1YsibrQNuTteVete3Xtbfn4+7777Lu3bt3fem7799lu399nc3FyXtO3bt+fdd98lNzfXbd7VTX+tlJQUevToQY8ePTh+/HiV3vNXJsFVNaSkpODl5UXz5s0rTBMQEEBERAQZGRnk5+e7/G3NmjUMHz4cb29vRo0aRdu2bdm5cycvvfQSM2fOZNy4cURERDB27FiioqL49NNPmTx5crmHk4O3tzcjR45k4cKFdOzYEYPBwJw5c5g8eTJ6vb7CMppMJt5++20mTJiARqNh9OjRDB48mKSkJOeN1cFqtTJr1ixGjhxJfn4+o0aNYtSoUeTn5zNy5EhmzZrlEsAUFxfz6quvMn36dAIDAxkzZgytWrViypQpvPXWW+XKUlxczJQpU3jxxRcJDQ1l/Pjx9OnTh2+++YYnnniClJSUCuvhsGHDBoYPH84vv/zCI488wtixY/Hy8mLChAl88MEHLuXbsWMHAwcOZPPmzc60Op2OCRMmMGfOHLfB2I1w1GvChAnodDrGjh3L0KFDOXDgAAMHDuTLL7+8oRWfHTp0YNmyZTz11FMAvPrqqyxbtozIyEhnmqysLAYPHswnn3xCt27dGDp0KJcvX2bixIk3XMfIyEiWLVvGq6++CsBTTz3FsmXL6NChg9v0er2eVq1acfLkSZcHWHFxMYmJiRiNRnbt2uXynhMnTlBQUMCdd96JSqVyvr5u3TqeeOIJrFYro0aNomXLlqxbt67cZ+NG2tLrr7/O9OnTadWqFU8++SReXl7MnDmT+fPnX/c87d+/n4EDB7Jp0yb69evH2LFjqVWrFtOnT2fKlCnOAEtRFL744gsGDx5MamoqgwcPZvz48TRu3Jh//etfTJkyBZPJhEqlonPnzgDs3LnTpX2UlZWxbds2mjZtSuvWrSstV3XqlZOTw7hx41i0aBFRUVGMHj2a0NBQRo8ezTvvvFPpMfR6PZMnT2bOnDkYDAY6duzIwoULGTlyJN7e3s50iYmJDB06lFOnTjFs2DC6du3qvO9d7wtUWVkZM2bMYMKECQQHBzN+/HgGDBjAzp07GT58ODt37rzuuYCK29DLL79MXl6eM11VP7dVqXt122N2djbPPPMM33//PY888ghDhw4lMzOT8ePHs3r1apf2kJKSwrBhw5g/fz4dO3Zk/PjxdOzYkfnz5zNs2LByeVc3/bVycnKYMmUKALNnz670GSh+pfyBGAwG578/Wv7FxcXKhAkTlC5duigpKSkVprNYLMrrr7/uki4/P1/p16+f0qFDByUxMdGZ1mq1KrNnz1YMBoPSpUsX5ciRIy75TJ06VYmMjFR27drlfP3zzz9XDAaDy2uOsvXr10/Jz8+/bl2+/vprJTIyUnnnnXeU0tJS5+vp6enKfffdpwwdOlS5ePGioiiKsn37diU2NlZ58cUXFaPR6ExbVFSk/P3vf1ciIyOVn3/+uVze16Y/e/as8thjj5Ur++eff67ExsYq3377rWKz2ZyvJycnK126dFEmTpzoUsZrnTp1SunVq5fy2GOPKWfPnnW+bjKZlMmTJyuxsbFKUlKSoiiKkpOTozz66KPKmDFjXM6TxWJRZs6c6ZLWcc0mTJigFBcXV/haRRzX6d1331UsFku589ChQwfl2LFjiqIoSkpKitKlSxe3+e7atUsxGAzK22+/7Tb/q8/l1a9PmzZNMZlM5Y57dR0ra9OOul7bphzl+fzzzyutvyNtZGSkS1pHXWNjY5Xhw4crBQUFiqJc+dw88MADyvnz513Sdu/eXdm3b58zj6s/G3v27HGpe1XbkuM8PfbYY0p2drbLeerTp4/St29fJTc3t8K6OcqbkJCgHDhwwOX1mTNnunwuzp49q4waNUp55ZVXlMLCwnL1iI2NddbPaDQqY8aMcfkMKoq9nXfv3l2ZPn26sz25uxbVqZfNZlPmzp3rtp0eO3ZM6d69+3Xvd4pS8efC8Xrr1q2V77//3nlNbDabsmTJEsVgMCirV692pndXn8OHDysJCQnKa6+95lI+xzUdM2aMy33mWo421Lp1a2X79u3lzr3BYHB5vTqf28rq7sirqu3x7bffVgwGQ7l78tmzZ5UBAwYo3bt3V06dOqUoypXP7bV1stlsyubNm5XWrVu7nJfqpr+2TtnZ2cpjjz2mdOnSxeX5JSonPVceptFo0Ol0bv/Wvn17YmNjnf9Xq9V06tQJvV5Pz549adasmUs+CQkJWCyWKg+PVYXJZOL7778nPDycAQMGuHzDNBgMDBo0iOTkZLKysjCbzaxfvx5/f39GjRqFn5+fM62/vz9PP/00ISEhfPfdd5jNZsxmM9u2bSMkJKRc+rp169K/f3+XshQWFrJu3To6depEly5dXHormjRpQs+ePTly5EilQ5y7d+/mxIkTDBkyhLp16zpf9/X1ZciQIfj6+nLgwAEA9u7dS2JiIoMGDSI4ONiZVqPRcP/997uk/S0c9YqLi2Pw4MFoNBqX8zBmzBjy8vL48ccff/OxKnLPPffg6+vrctynn37abY/RzRIZGUnLli3Zt2+fc0h43759BAUFMXHiRI4dO0ZmZiYARUVFHD58mJYtW1KzZk2XfHr27EmrVq2c/9doNDRv3hyLxUJGRgZw421p3Lhx1K5d2/n/2rVrk5CQQH5+fqXtzmazUVpaWu51jUbDiBEjWLx4sfMXIerWrcuCBQuYMWMGAQEB5ephNBqdefn5+dG5c2cOHDjg8isVR44cITMzky5duri0p4pUpV5FRUXs2LGDFi1alGunzZs3p1evXtc9TlV07dqVzp07O6+JSqVy9nxcr8fEbDa7Hcps0qSJs0fdy+v667IGDRpEx44dnf/XaDR06dIFwNmz6snP7Y20x7p16/Lggw+63JPr1q3LoEGDyMjIICkpCYDk5GQ2bdrEgAEDaN++vTOto+dzwIABbNu2jRMnTtxQ+qsVFxczc+ZMjh8/zttvv03btm2vW3dhJ6sFPcxqtVZrSa6/vz81atRAp9OVu2le+5DxhJKSEk6fPk3Tpk0JCQkp9/cnnniCJ554ArDPKzh+/DiRkZHUq1evXNrw8HBatmzJ8ePHuXz5MgDHjx+nbt26VVopmZOTQ0ZGBllZWcTFxblNU79+fYqKiirMLyMjg5CQEOeD7GoxMTHs2bPH+X/HSqSRI0dWWKYLFy5ct9zX46hX165d3V7D5s2bExUVxYkTJ37X5dvh4eHUr1+f9PR0t4GBpwUFBREXF8eePXvIz88nODiYvXv30rJlS+666y4+++wzEhMTiYmJISsri5MnTzJo0CC0Wq1LPhqNxuXh5M6NtqVrP3OVfTm6mlardQ7xjBgxgoceeoi77rqL+Ph4ateu7RLYgD0YS0pKYunSpWzbto2CgoIK8+7QoQM1atRg586dxMfHY7FY2Lp1Ky1btqywbteqSr0c56xNmzYEBgZWKd8boVarr3v9KhIVFUWvXr1Yvnw5ycnJ3HfffXTp0oWIiAiXgPt63LWhq7/8QfU/t5W1kxtpjxWdp8jISPR6vTMQTUlJwWg0cscdd7i9zp06dWLhwoWkpKTQunXraqd3KC0tZe7cuaxevZpZs2a5BKfi+iS4qiKdTke9evXYsGFDhXOgwD5H4OLFi+h0Onx8fH7HElaPTqdDra5ax2XNmjXLPfDAfjNwd4Np2LChS6/J9bRp08Y51+RaPj4+Lt/23fH19a3yudbr9fTr16/CwDU6OrpK+VRFjRo13J43b29vl2+nv5egoCCCg4OxWCy/yy7YGo2Gdu3a8cknn5CZmUlZWRn79u3j73//O/Xr16dt27YkJiby6KOPcvToUXx9fYmPj/9Nx/ytbak6OnbsyGeffcaSJUtYs2YNS5cuBaBTp05MnDiRuLg4VCoVVquVefPm8d///peOHTsybdo0Z8/pli1bWLBggUu+DRs2pFOnTuzcuZOhQ4dSVFTE3r17eeihh1x6XD0lLCysWp/X35Ofnx9Tp06lQ4cOLF26lGnTpgH2tvzUU0/x+OOPlwuSfitPfm490R4dX8CvVdE9rKLzUd303333HU2bNqVp06YsW7aMhISEcl8aRMUkuKqGyMhILBYLx48fJyEhwW2awsJCUlNTMRgMN6XnyVNsNluVJ1Tn5+djNpvLBVJms9ll0r5KpUKj0ZCVlUVpaWmVegDAfl7Hjh3r9oZWFdWpC0D//v2rNCn4t7p06RJms7lcvS5fvuzs6fs9FRQUcPHiRby8vFCr1b9LgBUZGUlYWBiHDh3iwoULWK1WYmJinIHXjBkzyMzM5OjRo7Ro0cJlaPdGj/db2lJ1qFQqoqKimDFjBtOmTSMnJ4fNmzcze/ZsRowYwaJFi4iPjycjI4NPP/2Ufv36MW3aNJcH9NmzZ8vl6+3tzd13383EiRNJT08nOzubc+fO0bFjxxvuAXJHo9Hg5eVFdnY2JSUlVf68/t78/PwYMGAAAwYMwGg0kpyczPLly/nPf/7D2bNnmTJlike/rHjyc+uJ9njp0iWys7PLvX7tgilPv24wGJg9ezb5+fmMGTOGjz/+mBdeeKFKw9JCVgtWS5s2bTAYDKxZs8ZtY1cUhc2bN3P06FG6du1a6Yq9W8XX15eGDRty5MgRt0twHdszHDx4EH9/f5o3b05KSorbh8DZs2dJSUmhefPm+Pv7o9friYyM5MyZM5w/f/66ZalZsyZhYWHs3r2bc+fO3VB9DAYD586dc7s0OCMjg7/97W8sWrTImdZoNLJ9+/ab+tuMtWvXxmAwcOTIEbc3rvT0dE6ePElUVJRzOLgq80Z+q8zMTLKysoiIiMDHxwe1Wn3Tj1u/fn3atGlDUlISP/74I3FxcYSGhgIQGxuLTqfjhx9+4NChQ7Rq1eqGPzOeaEvVkZ2dzdy5c/n2229RFAWNRkPdunV57LHHmDp1KgUFBfz0008A5OXlkZWVRUJCQrkgoKIViXfccQeNGjViy5Yt7N69m06dOrnMyfSEkJAQwsPDycjIoLCw0KN5e8p3333HnDlznPcTvV7PHXfcwbRp07jnnnvYvn272+0UbkR1P7eVuZH2WNGXxOPHj2OxWJwrgh3DhL/88ku59mO1Wvnll1+c9+IbSe8QFxdHgwYNaNeuHc8//zwLFiy44VXOf0USXFVDeHg4AwcOJCkpif/7v/9z+RajKArbt2/nnXfeoU2bNtxzzz2/e/nKysquu1eTTqfj3nvvJTMzk9WrV7t82M6dO8f//vc/ZwCm1Wrp1asXeXl5fPTRRy579xQXF/PRRx+Rl5dHr1690Gq1aLVaunTp4jb9uXPnWL16tUtZgoOD6dmzJxkZGSxYsMAlvc1mY/Xq1axdu7bSHpb27dsTFRXF//73P3JyclzOxZdffsm+ffucvVSOtMuXL3eZi+Uo39tvv+02aK6uwMBA7r33Xg4dOsSKFSvKLX2fN28eISEhdO/eHbjykHP07jiUlJRcdzPaa/dTcti+fbtLW8jJyWH+/PkEBQU55074+PgQERFBVlYWx44dc6ZVFIXExMRKJxtX9Ru8TqejVatWbNiwgS+++ILOnTs7H0x16tQhISGBpUuXkpaWRps2baqUpzueaEvV4ePjw/bt2/n3v/9dbh8oi8XiLJMjrV6vZ9u2bS7lOn78eLkhQYdatWrRsWNHVqxYwRdffHFTvqwFBATQqVMnt+30+PHjfPfdd9XKz2Qyebw3tKysjP/85z98+umnLuWzWq1YrVb0er3HhgWr+7m92rV1v5H2eO7cuXL77Z0+fZrVq1djMBicn4+oqCi6dOnCqlWryv381e7du1m1ahVdunQhKirqhtJfS6VSMXDgQHr37s0777zDkSNHrnsuhQwLVotKpeJvf/sbOTk5LFmyhO+//54OHTpQv359duzYwfHjxzEYDPzzn/+8KT99UxFfX1/q16/P2rVrmThxIu3atePhhx+mfv36btPffffdDBs2jPfff5+dO3fSrl07TCYT33zzDRaLhXfffdc5xt+lSxeee+45Zs2axZEjR+jWrRsAP/30E2lpaTz33HPOVTeOvPv27cvq1aud6YuKili/fr3zoePgOJ+pqal88skn7N27l27duqHRaEhMTGTfvn0899xzlX5TCg8PZ+zYsUyZMoV+/frRu3dvdDqdy/sdE18bNmzIlClTGDt2LCNGjKBXr140btyYvLw81q9fT1hYGP379/fItevfvz+HDx92e47z8/N58803nT0RAQEB9OjRgzfeeIPBgwfTu3dv5zk+f/68296lhg0b4uXlxdSpU9mzZw+dO3d2mduxePFitm3bRufOnbFarXzzzTecOnWK8ePH07JlS2e6O++8k2XLljFx4kQ2bdpEeHg4v/zyC/v373dbr5CQEOrXr8+8efPIzMykdevW9O3bt9JzER8fj16vx9/f32Vyr1arpWvXrnz66afccccd5b45V4cn2lJ11KhRg2effZaxY8cyaNAg+vTpQ0hICCdPnuS7776jRYsW9OzZE4BmzZrRu3dvVq1axeHDh+natSspKSns2bPHOVfw9OnTLiu5NBoNPXv25OOPPyYkJKTCaQi/hUqlon///mzevNmlnWZmZrJp0yZsNluVpjY4voxt2LCBV155hRYtWjB06FCPlLFHjx7079/fpXxWq9V5/3nppZc8Og+oOp9bqLjuAQEB1W6Per2eNWvWsHHjRpf7ptFoZMaMGYSHhwP2YdIXXniBcePGMXLkSOd9zNH2GjVqxAsvvOAMOqub3h0/Pz+ee+45xowZw5tvvsl///tfmX91HRJcVZOfnx+vvvoqvXv3ZvHixfz000+UlpZiMBiYNGkSAwcOvCmTTiujUql48sknOXPmDN988w05OTn06dOnwvR+fn5MmjSJ+Ph4Pv74Yz788EP0ej29e/dmzJgxNGzY0JlWo9Hw3HPPER8fz7x585y/WN+2bVsmT55M586dXSbG+/n5MX36dFq0aMHChQuZN28esbGxTJ06FYAJEyaUK8uMGTO46667WLBgAfPmzcPHx4d27dqxcOHCcvm7q3ufPn0wGAx8+OGHfPHFFxiNRtq2bev2/Z06dWLNmjXMmzePb775hrVr1xIeHs7f/vY3hg8f7rEfjXVMxO3cuTMLFixg9uzZ+Pj40K1bN5599lliYmJclqYPHjwYvV7P3LlzWbRoEaGhoTz22GN07tyZF198sVz+CQkJTJo0iQ8++IDFixeXm4j//vvvk5yczIoVK8jMzMRgMDBz5kz69u3rMmciJiaGDz/8kHfeeYcNGzag0Wjo1q0bH3/8McuXLy+3DUiTJk2YNGkSM2bMYPny5W5XnF6rcePGziE/xwPi6uMbDAbi4uIICgqq8vmluA9gAAARQklEQVR157e2perq1KkTn3/+OYsXL2b9+vUUFBQQHh7O008/7dKWdDodr7/+OhERESxevJilS5cSGxvLO++8Q61atRgxYgQnT55EURSXOVXNmjWjXbt21KpVi0aNGnms3FerXbs2c+bMYfbs2XzxxRfs27ePtm3bMmfOHLZt28aGDRuum4dOp2P8+PHk5uY6z8Njjz3mkfI57ieOCe2Oz1G7du2c9x9PzkOrzucWKq97ddtjREQEH3zwARs2bGDx4sVcuHCB2NhYXnjhhXIb60ZGRrJ8+XKWLVvGp59+ytq1awkNDS3X9m40vTuO+WOTJk1i7ty5vPLKK7dkYc6fhUr5Aw2gXr2c/uo9Xv4s+Qtxq61cuZKXX36ZFStWuPSECFEdJpOJV155haNHj7Jw4cJyQbEQonIy50oIIf6isrOzef/998vNodu3bx+bN28mPj7+d53iIMTtQoYFhRDiLyo5OZlPPvmEzz77jN69e+Pv7++ci1OvXj1GjRolQz9C3IA/VHCl1+sxGo0Abnfc9uRxhBDir+7OO+/kiy++YMWKFaxfv57MzExCQ0N58sknGT58OHXq1LnVRRTiT+kPNedqypQpLF++/KYfZ9iwYc4J1kIIIYQQnvSH6rlyBDxffvmlswfLkxw/fSKBlRBCCCFulj9Uz5UQQgghxJ+drBYUQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8SIIrIYQQQggPkuBKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8SIIrIYQQQggPkuBKCCGEEMKDJLgSQgghhPAgCa6EEEIIITxIgishhBBCCA+S4EoIIYQQwoMkuBJCCCGE8CAJroQQQgghPEiCKyGEEEIID5LgSgghhBDCgyS4EkIIIYTwIAmuhBBCCCE8yOtWF0AIIYQQ5eWHR97qIvwl1MxM8Xie0nMlhBBCCOFBElwJIYQQQniQBFdCCCGEEB4kwZUQQgghhAfJhHYhhBDiT+JmTL7+K/m9FglIz5UQQgghhAdJcCWEEEII4UESXAkhhBBCeJAEV0IIIYQQHiTBlRBCCCGEB0lwJYQQQgjhQRJcCSGEEEJ4kARXQgghhBAeJMGVEEIIIYQHSXAlhBBCCOFBElwJIYQQQniQBFdCCCGEEB4kwZUQQgghhAdJcCWEEEII4UESXAkhhBBCeJAEV0IIIYQQHiTBlRBCiNvWypUriYiIoH///ly8ePFWF0f8RXjd6gIIIYS4NcrKyti6dStLly4lMTGR0tJSwsPDefjhhxk+fDi1atW61UUUN1nyeQvvbzLy4/FSCk0KahXUq6FhRCcdf+vkh7+P6lYX8U9Jeq6EEOIvKC8vj3HjxvHUU0+xdetWSktLAcjMzOSDDz7ggQceYMeOHbe4lOJmWrO/hAfez+erfSUUmhQAbAqcuWjlza8v0+f9fE7lWW9xKf+cJLgSQoi/mLKyMt599102btxIUFAQ06ZNY9euXSQlJbFmzRo6derEhQsXmDp1KqdPn77VxRU3wdGzFqZ8VYSpTKF5XS8+HxNM2lu1Ofh6KNP7BRDoqyIl28Lk1YWYzMqtLu6fjgRXQgjxF3P48GHWrFmDl5cXb775JkOHDqV27doEBwcTFxfHjBkzMBgMnDhxgi1bttzq4oqb4OAZM7mXbfhqVUx7KIDOTb3x1aoI8VfzRGc/hnf0A2DfaQsp2ZZbXNo/HwmuhBDiL0RRFDZt2oTRaKRdu3Z06tSpXJqwsDDi4+MB2L9/v3PI0GazsXXrVkaMGEF0dDQRERHcf//9LFq0iOLiYuf7TSYTzz//PBEREbz99tscPHiQESNGEBUVRXx8PK+99hrZ2dnljltcXMyiRYvo0aMHERERxMfHM3nyZLe9Z2VlZaxbt46+ffs6044bN47Dhw+jKNLTUh1mW/nXHu+sY9bQIN56JICwQI3z9TMXrUxYUUD0aznUfykbw6Qc+ryfz7aUMhynfePRUhq/nE3jl7PZeLTUJd8fj7v/myPfppPt+Ua/lsOEFQWcufjnHJaU4EoIIf5CTCYTmZmZAERGRhIQEFAujY+PD2+88QZJSUlMmTIFb29vrFYr77//PiNGjGDr1q34+voSHBzM8ePHmT59OlOmTHEJsBzWr1/vfI/FYqGgoIBPPvmE119/3SV9Tk4OI0eOZPr06WRkZABQUFDAp59+ykMPPeQy/6u4uJjJkyczYcIEDh8+7Ez79ddfM3z4cHbu3OnRc3Y7atlASy1/NSVmhZc+L+THY6VYrwqy6tXQ0C/el4da+xIWaA8V9p40c9+7+axKss/RCvBVYbEq7DttZvjCS6zZXwJAfEMtjUK8MFvhh2uCq60pZZit0CjEi/iG2nL5msrsEVqhSWFVUgmPfnjxTznvS4IrIYT4CyktLeXMmTMA6HQ6NBqN23R6vZ7g4GACAwNRqVQcOnSIhQsXotfrmTNnDklJSezdu5clS5YQFBTE2rVr2bNnT7l8Ll++zCuvvMLRo0c5evQor776KgDbtm3jxIkTAFitVhYsWMDu3bsxGAysWrWKtLQ0EhMTefDBBykoKGDBggUUFRWhKApLly5l9erVtGnThh9++IG0tDQOHDjAU089RUFBAXPmzOHSpUs36QzeHqLrevFsNz3eXirOXrIyfOElWr1+gb9/Vsiek2aXQAugxKzwzveXuVhso1W4lt3/rMXx6bU5+EYo7Q1ayiwK87cUU2hSqOWvpnu0NwD7M81cKrZndqnYxo7UMgC6R3tTy19NTpGNl1YWcrHYxgMtfTn0Rihn/h3Ghgk1aVhTw8lcK7N+NPJn64yU4EoIIcR1xcTEsGXLFrZu3co999yDWq1GpVIRFxeHwWDAYrGQk5NT7n0PP/wwjzzyCL6+vvj6+tKtWzcaNWqE0Wh09lydPXuWzZs3A/D8889zxx13oFKpqFWrFk8//TQhISGkpqaSnZ1NTk4O33zzDQDPPvssERERqFQqAgICePLJJ2nRogWJiYmkpqb+fifnT0ilgtF3+bFhQk1ah2tRq+BisY3P95h4eFY+Xd/O5efkK0N9Pl4qZg0JIum1UFY8XYMGwfagPNhPTdcoHwByi2wU/9rzdF+sD37eKtIvWEn+dc5WRq6V03lW/LxV3Bdrf8/m5FLScizUDlTzyv3+1NSrUamgVbiWCT31AGxLKeNCkZuxyz8w2edKCCHEdWm1WvR6Pd9//z0rV6507ot1Pdf2jtWoUYPg4GBOnTpFVlYWANnZ2Zw8eZK6devSvHlzl/fHxMS49Ijt37+f9PR0AEaOHFnhcTMzM0lISKhWHf+Kout68fWEmmTmW/kiqYTP95o4lWflVJ6VJxZf4t1HA3k43heVCkL81Rw7Z+HVLy+zPa3MuX2D23zraIkM8+JAppkdqWbaG7xJzDBTWKLQKlxLdJ0rQ4I2BXIKbXT5V67bvApMCucLrdQO/PP0B0lwJYQQfyE+Pj40aNCA/fv3YzKZsFqtFQ4NXq24uJgpU6awevVqgoKCGDBgACEhIZhMJtauXet2gnpVWa1WLBaLszesMqWlpRiNRry8vGjSpAm+vr5u0/n4+Nxwef6KwmtqeP4ePRN66tlz0szYTwo4e8nKWxsu086gpV4NDV/tK+HvnxVSZlFoFKKhZ7QWtRqOnbVw5KzrisJAnYp7WvhwINPMzrQynuis4+dkezDepak3gTr7dS4ssQdo3l4qQvQqt9ff30eF+jrt4o9GgishhPgL0el0hIeHA5Dy/+3cf2jUdRzH8df37rvdTqfntDtj5tDUJYY0V83KVChS1x9mUrb+yHRgQlDSMhBxSoSpmNTwr1wzgrQji8IhpIKUm8S5YMi8mLO6ZtR0mz/ybtvp3X2//bF145rV0C+tuefjz7s3fN733R978f58Pt+zZxWNRjVu3LiMmkQioS1btigYDGr58uXaunWrGhoadPDgQU2aNEk1NTUqLCyUJF2+fFkNDQ23FK7cbrdM05RlWf960+/PWo/Ho23btqmoqOim1x2p4glb73/TrZ86kyouyNKLj4xKf2cYUsnULL2+aLRe//SqOmOWfu5MKdfj0p7j3bqetLVsTo7eKxurLHdv4Hn3aJfCv8UGrLPoXo9q6rrVfD6pEz9cV/P5pMbmGCqd3R98s/qGUQXj3fr85TzdkTt8plP/5Pb4FQCAQTEMQyUlJTJNUydPnlRjY+OAmra2NoVCIUlSUVGRPB6P2tvblUwm5ff75ff707WWZSmVurXbXBMnTtSUKVPU1tam5ubmjO8ikYhWrlypVatWqbW1NV3b1dWlY8eODVh7MAFtpDNdhhrPJfTZd3F9UNet9qv/fp6p+7qtzr5zT9MDZjpYSVLSuvHzvtvv1qx8Uxdjlj6o61Zn1NI9d5qaHuif69zXd2Ow9WJvAPur5PC7KCiJcAUAI05JSYmWLVumZDKpDRs2qLa2VleuXFEsFlNTU5M2btyoSCSiwsJCLViwQJJkmr3/ECORiE6dOiXLshSNRrVnzx41NTXdUj/5+flauHChJGnv3r06c+aMbNtWNBpVMBhUfX29bNuWz+dTfn6+lixZIkmqrq5WMBhUPB6Xbds6d+6cKioqtGPHDsXj8Vvq6XZmuqVnH8hRttl74Hxd8Hc1/ZpQImUrnrB19Ptr2nWkS5KUP86l6QFTLkNy9yWGupZrao9aSlnS4fA1fXSi54breLMMLe47uH4y0nu2qnR2jsbk9AezRbM8mjLBrURK2vxlVEfCva+ESKRsfdEY10Nvd+jjb3uG3W1BtgUBYITxer1av369Lly4oLq6Oq1bt25Ajd/v1+bNm1VQUCBJmjdvnubOnatQKKTy8vJ0nWma8nq96unpUUdHx03143a7tWbNGp0+fVqhUEilpaUDelm7dm16+3L16tVqbW1VbW2tKisrVVlZmVH/Z3DE33tydo5eeyKldw7HdLzluo63XBpQ4802tH5xrgJjXbJt6bkHvdp1JKZQJKE5b/b/rbP6juzFk7audFu609c/t5k3LVsTcl26GLOUN8qlR2dkZ6wxebxbbz09Rq/u792CXP1h5is0XIZ0IZqSZUvuYXTsiskVAIxAgUBA1dXVqqqqUnFxcXoyNXPmTFVUVOjQoUMZb28PBALavXu3ysvL5fP5ZJqm5s+fr/3792vp0qWSpJaWFvX03HiKMZh+ampqtGnTJk2dOlWS5PP5VFZWpgMHDmT0kpeXp507d2b0bpqmiouLVVVVpe3btys3N/dmH82IYBjSq4+P1tGKCVrxoFeBMf1xIDDGpedLvPr6jQl6qignXf/K46O1a8XYdHjyZht64WGv3ivzKcstXe6y9GNHZqidHjBV3Lf1N/suU9P8Ay9PPDbTo69eG69n7s9JH3TPNg3NKcjSJy/lqeKJ3PTUbLgwbDanAQD437k0ecaAz8b/cnYIOrl9/FfPdJhlQQAAgP83whUAAICDCFcAAAAOIlwBAAA4iHAFAADgIMIVAACAgwhXAAAADiJcAQAAOIhwBQAA4CDCFQAAgIMIVwAAAA4iXAEAADiIcAUAAOAgwhUAAICDCFcAAAAOMoe6AQAAMDiXJs8Y6hYwCEyuAAAAHES4AgAAcBDhCgAAwEGEKwAAAAcZtm3bQ90EAADA7YLJFQAAgIMIVwAAAA4iXAEAADiIcAUAAOAgwhUAAICDCFcAAAAOIlwBAAA4pL29nXAFAADglPr6ernC4fBQ9wEAADDshcNh7du3T38AmadTHZOh2JEAAAAASUVORK5CYII=) Step 1: Install dependencies for model evaluation 🔽<jupyter_code>!apt-get install -y \ libgl1-mesa-dev \ libgl1-mesa-glx \ libglew-dev \ libosmesa6-dev \ software-properties-common \ patchelf \ xvfb<jupyter_output><empty_output><jupyter_text>Step 2: Install and import the packages 📦<jupyter_code>!pip install gym==0.21.0 !pip install free-mujoco-py !pip install transformers !pip install datasets !pip install imageio-ffmpeg !pip install colabgymrender==1.0.2 !pip install xvfbwrapper !pip install imageio==2.4.1 !pip install imageio-ffmpeg !pip install huggingface_hub import os import random from dataclasses import dataclass import numpy as np import torch from datasets import load_dataset from transformers import DecisionTransformerConfig, DecisionTransformerModel, Trainer, TrainingArguments<jupyter_output><empty_output><jupyter_text>Step 3: Loading the dataset from the 🤗 Hub and instantiating the modelWe host a number of Offline RL Datasets on the hub. Today we will be training with the halfcheetah “expert” dataset, hosted here on hub.First we need to import the load_dataset function from the 🤗 datasets package and download the dataset to our machine.<jupyter_code>os.environ["WANDB_DISABLED"] = "true" # we diable weights and biases logging for this tutorial dataset = load_dataset("edbeeching/decision_transformer_gym_replay", "halfcheetah-expert-v2")<jupyter_output><empty_output><jupyter_text>Step 4: Defining a custom DataCollator for the transformers Trainer class<jupyter_code>@dataclass class DecisionTransformerGymDataCollator: return_tensors: str = "pt" max_len: int = 20 #subsets of the episode we use for training state_dim: int = 17 # size of state space act_dim: int = 6 # size of action space max_ep_len: int = 1000 # max episode length in the dataset scale: float = 1000.0 # normalization of rewards/returns state_mean: np.array = None # to store state means state_std: np.array = None # to store state stds p_sample: np.array = None # a distribution to take account trajectory lengths n_traj: int = 0 # to store the number of trajectories in the dataset def __init__(self, dataset) -> None: self.act_dim = len(dataset[0]["actions"][0]) self.state_dim = len(dataset[0]["observations"][0]) self.dataset = dataset # calculate dataset stats for normalization of states states = [] traj_lens = [] for obs in dataset["observations"]: states.extend(obs) traj_lens.append(len(obs)) self.n_traj = len(traj_lens) states = np.vstack(states) self.state_mean, self.state_std = np.mean(states, axis=0), np.std(states, axis=0) + 1e-6 traj_lens = np.array(traj_lens) self.p_sample = traj_lens / sum(traj_lens) def _discount_cumsum(self, x, gamma): discount_cumsum = np.zeros_like(x) discount_cumsum[-1] = x[-1] for t in reversed(range(x.shape[0] - 1)): discount_cumsum[t] = x[t] + gamma * discount_cumsum[t + 1] return discount_cumsum def __call__(self, features): batch_size = len(features) # this is a bit of a hack to be able to sample of a non-uniform distribution batch_inds = np.random.choice( np.arange(self.n_traj), size=batch_size, replace=True, p=self.p_sample, # reweights so we sample according to timesteps ) # a batch of dataset features s, a, r, d, rtg, timesteps, mask = [], [], [], [], [], [], [] for ind in batch_inds: # for feature in features: feature = self.dataset[int(ind)] si = random.randint(0, len(feature["rewards"]) - 1) # get sequences from dataset s.append(np.array(feature["observations"][si : si + self.max_len]).reshape(1, -1, self.state_dim)) a.append(np.array(feature["actions"][si : si + self.max_len]).reshape(1, -1, self.act_dim)) r.append(np.array(feature["rewards"][si : si + self.max_len]).reshape(1, -1, 1)) d.append(np.array(feature["dones"][si : si + self.max_len]).reshape(1, -1)) timesteps.append(np.arange(si, si + s[-1].shape[1]).reshape(1, -1)) timesteps[-1][timesteps[-1] >= self.max_ep_len] = self.max_ep_len - 1 # padding cutoff rtg.append( self._discount_cumsum(np.array(feature["rewards"][si:]), gamma=1.0)[ : s[-1].shape[1] # TODO check the +1 removed here ].reshape(1, -1, 1) ) if rtg[-1].shape[1] < s[-1].shape[1]: print("if true") rtg[-1] = np.concatenate([rtg[-1], np.zeros((1, 1, 1))], axis=1) # padding and state + reward normalization tlen = s[-1].shape[1] s[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, self.state_dim)), s[-1]], axis=1) s[-1] = (s[-1] - self.state_mean) / self.state_std a[-1] = np.concatenate( [np.ones((1, self.max_len - tlen, self.act_dim)) * -10.0, a[-1]], axis=1, ) r[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, 1)), r[-1]], axis=1) d[-1] = np.concatenate([np.ones((1, self.max_len - tlen)) * 2, d[-1]], axis=1) rtg[-1] = np.concatenate([np.zeros((1, self.max_len - tlen, 1)), rtg[-1]], axis=1) / self.scale timesteps[-1] = np.concatenate([np.zeros((1, self.max_len - tlen)), timesteps[-1]], axis=1) mask.append(np.concatenate([np.zeros((1, self.max_len - tlen)), np.ones((1, tlen))], axis=1)) s = torch.from_numpy(np.concatenate(s, axis=0)).float() a = torch.from_numpy(np.concatenate(a, axis=0)).float() r = torch.from_numpy(np.concatenate(r, axis=0)).float() d = torch.from_numpy(np.concatenate(d, axis=0)) rtg = torch.from_numpy(np.concatenate(rtg, axis=0)).float() timesteps = torch.from_numpy(np.concatenate(timesteps, axis=0)).long() mask = torch.from_numpy(np.concatenate(mask, axis=0)).float() return { "states": s, "actions": a, "rewards": r, "returns_to_go": rtg, "timesteps": timesteps, "attention_mask": mask, }<jupyter_output><empty_output><jupyter_text>Step 5: Extending the Decision Transformer Model to include a loss functionIn order to train the model with the 🤗 trainer class, we first need to ensure the dictionary it returns contains a loss, in this case L-2 norm of the models action predictions and the targets.<jupyter_code>class TrainableDT(DecisionTransformerModel): def __init__(self, config): super().__init__(config) def forward(self, **kwargs): output = super().forward(**kwargs) # add the DT loss action_preds = output[1] action_targets = kwargs["actions"] attention_mask = kwargs["attention_mask"] act_dim = action_preds.shape[2] action_preds = action_preds.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0] action_targets = action_targets.reshape(-1, act_dim)[attention_mask.reshape(-1) > 0] loss = torch.mean((action_preds - action_targets) ** 2) return {"loss": loss} def original_forward(self, **kwargs): return super().forward(**kwargs) collator = DecisionTransformerGymDataCollator(dataset["train"]) config = DecisionTransformerConfig(state_dim=collator.state_dim, act_dim=collator.act_dim) model = TrainableDT(config)<jupyter_output><empty_output><jupyter_text>Step 6: Defining the training hyperparameters and training the modelHere, we define the training hyperparameters and our Trainer class that we'll use to train our Decision Transformer model.This step takes about an hour, so you may leave it running. Note the authors train for at least 3 hours, so the results presented here are not as performant as the models hosted on the 🤗 hub.<jupyter_code>training_args = TrainingArguments( output_dir="output/", remove_unused_columns=False, num_train_epochs=120, per_device_train_batch_size=64, learning_rate=1e-4, weight_decay=1e-4, warmup_ratio=0.1, optim="adamw_torch", max_grad_norm=0.25, ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"], data_collator=collator, ) trainer.train()<jupyter_output><empty_output><jupyter_text>Step 7: Visualize the performance of the agent<jupyter_code>import mujoco_py import gym from colabgymrender.recorder import Recorder # Function that gets an action from the model using autoregressive prediction with a window of the previous 20 timesteps. def get_action(model, states, actions, rewards, returns_to_go, timesteps): # This implementation does not condition on past rewards states = states.reshape(1, -1, model.config.state_dim) actions = actions.reshape(1, -1, model.config.act_dim) returns_to_go = returns_to_go.reshape(1, -1, 1) timesteps = timesteps.reshape(1, -1) states = states[:, -model.config.max_length :] actions = actions[:, -model.config.max_length :] returns_to_go = returns_to_go[:, -model.config.max_length :] timesteps = timesteps[:, -model.config.max_length :] padding = model.config.max_length - states.shape[1] # pad all tokens to sequence length attention_mask = torch.cat([torch.zeros(padding), torch.ones(states.shape[1])]) attention_mask = attention_mask.to(dtype=torch.long).reshape(1, -1) states = torch.cat([torch.zeros((1, padding, model.config.state_dim)), states], dim=1).float() actions = torch.cat([torch.zeros((1, padding, model.config.act_dim)), actions], dim=1).float() returns_to_go = torch.cat([torch.zeros((1, padding, 1)), returns_to_go], dim=1).float() timesteps = torch.cat([torch.zeros((1, padding), dtype=torch.long), timesteps], dim=1) state_preds, action_preds, return_preds = model.original_forward( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) return action_preds[0, -1] # build the environment directory = './video' model = model.to("cpu") env = gym.make("HalfCheetah-v3") env = Recorder(env, directory, fps=30) max_ep_len = 1000 device = "cpu" scale = 1000.0 # normalization for rewards/returns TARGET_RETURN = 12000 / scale # evaluation is conditioned on a return of 12000, scaled accordingly state_mean = collator.state_mean.astype(np.float32) state_std = collator.state_std.astype(np.float32) print(state_mean) state_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] # Create the decision transformer model state_mean = torch.from_numpy(state_mean).to(device=device) state_std = torch.from_numpy(state_std).to(device=device) # Interact with the environment and create a video episode_return, episode_length = 0, 0 state = env.reset() target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1) states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32) actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32) rewards = torch.zeros(0, device=device, dtype=torch.float32) timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1) for t in range(max_ep_len): actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0) rewards = torch.cat([rewards, torch.zeros(1, device=device)]) action = get_action( model, (states - state_mean) / state_std, actions, rewards, target_return, timesteps, ) actions[-1] = action action = action.detach().cpu().numpy() state, reward, done, _ = env.step(action) cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim) states = torch.cat([states, cur_state], dim=0) rewards[-1] = reward pred_return = target_return[0, -1] - (reward / scale) target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1) timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1) episode_return += reward episode_length += 1 if done: break # Play the video env.play() # If you want to convert the video: # !ffmpeg -i {your_video} -vcodec h264 replay.mp4<jupyter_output><empty_output><jupyter_text>Step 8: Publish our trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the hub 🤗 with one line of code.Under the hood, the Hub uses git-based repositories (don't worry if you don't know what git is), which means you can update the model with new versions as you experiment and improve your agent. To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role** ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAagAAAE5CAYAAADFiLQmAAAgAElEQVR4nOydeWBU1dn/P3dmkskeIEBYJcguIQGhEDdcC7K8bqDFFQT11brUV9TaWoWotS3V/mpBRapsKi6Au1VAK7uABGQREGQNayAJWWcmmZnz+2Myk1mTzJJkgOfTxofnLme7957vfe4594723vvvq1GjRmE0xtEQNEC5+xoo5W41lFJ+fF/rtWNNyn58l3VSn998+LQPvrWo0zqrH3ICdVhngn4zaIwMfQvg8Lz9MKoTLI1Z3QhQf/E0FMqP72tDr0AQFWqK9nTzw8kuhNqFQIQbxF8/GdA6XW/f5TpS9fabtH2CQ7NUVav6N2s+GtyhN8D6KIB3AvUKZBNc8JFun2BL28QdTuAaRaZAdXXgvta5l7cfwaPXzO3b+Nk3h2L5UbAw9cA7eX8devNf/aEQ5PHxaJDaAMO5vK4AxMc6U3EGKDXU1T4+AuUqgJffEOs8kpGPkKKHcC4/5w4Ri5DqTDASGQR/fBpdvwJl2JjVjSDBRECBIqKGR0hOgqhQ854+TX/+1EuEG8Srww8qxPHePMjdG6d9Gpeoj6C8Cfp0qeOA+tdLtw7fO4F6BTZSl1Top1AkLye/pWnmDqzhNfZfQGeH7+2HEmGFVNxQq9PEAhGJ7P1n15gVbEAJIqkvzsWB9aRBpYtu6jlejRxhBS1Qvv138GNO50KEFfLl5Na+kUnQTwbN2EFEjX5Fsnph4Fu8QBGWtx+JCKuZFSwIPYlEdg2oXQQIs8RBRlh1bl7/7lHfu0ZdBFXv4fURyMAHxLmBU0AbuEMdAtn8l0BjXLCNnkETKkpwEZJzqzN8zKlJi1MrmI1fQT8Vdvaw3goWhclHJ/Ucr3oUr+4Iybk4gE9t+2oNbCCtqqpaeRQ3iIio3gjJp8N3Et2H0J2wLz9n9d0SCE///CTYaB1C/YrT5HrWFB1+BE/PWsH09H07/EiMQUWgAk3cvtF3+BqzAbw7fOfiwH44EdGZIZh1E3URVLAEfTq59e8evj89rXMHtxyjeNZfo3cATdGhhVTjhhUouDGoho1JhUWUCXDjH85GFgTvEjh7dA8/dBuOYJyZAlLP8QowJhWSJQSB8imej7LLe1BhXW5+9C+yPUSgDBojQ98CODt0Tz9q9avJe5j6iyfvQYWaXQi1C4EIN0idIZSvQjofqfmNwJypBhDQpmmf4Ij6CKrBHXoDrI8CeCdQr0A2wQUf6fYJtrRN3OEErlFkCiTvQTV19s2hWH4ULEw98E4+nIgomjr8oI9PgIjI/xiUvAfV7IRz+Tl3iOwsvUAJRiKD4I9Po+tXoAwbs7oRRN6Dqrs4TX7+1EuEG6TOSQjOxYEVMZwxqcZpn8Yl6iMob4I+Xeo4oP710q3D906gXoGN1CUV+ikUycvJb2mauQNreI39F1Deg6q7wI1+/jRqBRtQgkjqi3NxYD1pUOmim3qOVyNHWPIeVIQJ+3Jya9/IJOgng2bsIKJGvyJZvTDwLZ68B+VPTyKRXQNqFwHCLHGQEVadm9e/e9T3rlEXQdV7eH0EMvABcW4g70EF0b82e48QXgHkPajGLo68B9WIh6cJqOd41aN48h5UlBH25eesvlsC4emfnwQbrUOoX3GaXM+aosOP4Okp70HVXbzoO3yN2QDeHb5zcWA/nIjozBDMuom6CCpYgj6d3Pp3D9+fnta5g1uOUTzrr9E7gKbo0EKqccMKJO9BNaw4jXc4G1kQvEvg7NE9/NBtOIJxZgpIPccrwJhUqO9B6UIpnoeveVstgO9rNW+rBfBdVvPx8evXYQngo9UU2tvHtdzHx913tI7m0Uq1fsOsMynNlbqmhWdxtzUrfHzNn+9r8edTV4a+BdD8+gEsdVv///Ne67nX5k15jBwxkldnzHCVJXSLhx+JP2ea3r53Xr6+r9W8LQH8gG1W/zHwOVZ1HFP8WgL4/s+xQOdiQ2xtcp5+OBZ3W/NfH7+BFn/Wt4Otwzr7Lm+/nj7Qo2/19QP1vX77aDx9CMMSgkB5h1vO54MHDhzkySefILPvBbROa8XYMWNZsXw5NputZjvlY5W3Vb4+Hlb5+tT4KKqrqykuLqKysrJmufKwrsL69d1q5+0HYZVXKymP5Q1IQdX6NdWs0xKMdbabt6/cMiScDOqxKEdLePselnr98vIyioqLsdltAVPx3ttpzWYzy5YtpaTktKsstZYAfkMOXAP/vLf1SsM7L9+mDFRWt7qqwG1CAL/uNq/Dqlrfz8kW3jnjfQ66klVe2akGZeeTnB+LZ/IuP9Ah97XuD3Q92zeYVFzWu4+q0zr7Om/fq+/09t37Xm+f2uX++m6Csc6aeQwq4dVenvgIlPsLVO5+IAuw5Ouvueaaq3nln/8ENAYOHMjq1asYM+YmFi1c6KOqPn5AVfZVeR/rdpewbds2+mdn8+knn3gsb1hEBD53LX7uarzvduq+K/K8i/K463LzQ7bOu0PcquO6a/T267d13r3Weddba+u8iw50143n3bu37+5VV1WRO3Uqd9x+O6eLT9cU0090UGPxtm74rsfT96peJP580vZXxgDWu07edfVpAz9tGGhNwGNT3zF1s3WfK3WcY2627nPUzzlOrd+A5Ou1uPvOs8S5vPasaZDFnx+or6mzj6q13n1a8BESPn6dllp/3rx5NUWrXY6XXb58ud/lHi3iagOPlvCLwXuBj7rVo4q7du3iT396GhR88umnDB9+LTqdjlMnT/LZ558zavRo111OTYrOhD19Hx31r6ua1xqnrwEmk4ljx4653dk5jmugQUbnjg0bc3IsUPjxg7Aod99ZGefyQH4d1tWezvbwvIOrndMWvHUm7OG7DoCX71m90KxblTx8N6w2KxXOCJnaO8LafZ0JevueZ07NfSWa0uq2RPY9KN/7ac/7bqWpmuJ7+TWWAH5gS63fgAI7d/P2Q7HOvDyz9zo+XidQsNeTz/Xl4fupsNf5Wr/1bEdNqznnaiqk1ZyDmtbA60p5XU8+kRAB/ACbB7e7R4s0DEfFly9fzt13382BAweYMmWKe4PgHHOaO3cOEydOZN++fWR07QohjDlp4PIhlDEot+Nss9n48IMP2Lp1K7979Hdce+0I9Ho9AG3atmXixIkkJSUBsHnzZkaPGsmGDRv47LNPGXjhhQwf9msKCgrQgK1bt3LbbbfSOq0V7dulM+XZZykuLnKp9+H8fP7yl7+QmZmJMTaGHj26M2PGdExmMxaLhalTp/J/j/4OgH/+v//HqFEjyZ06FbPZgqZpbNniSD+tlSP9Z595lqLiYtzvQtzvXvIP5fPiX/5CZt++xMbG0L17N6ZPn47JZPK9C0LDbDYzdcpUfv/kk+zfv59nn3mGdunptEtvyz//+U9sVivOuymlYPl333Ht8GHExhjo3r07M6b/C7PJhKbBG2+8waiRI1i2dKnrbm7ZsqWMGjmC116d4brbO7B/H7ffdit/++tfsFqrfe4ei4oKee21V7n0kouJiTHQLr0tzzz7DMXFRW43ZRqVlRVMnz6dzMy+xMQYuPDCAXzw4YdYbVbQNA4dPMADD9xPenpbYgwGbr55LFu2bsGZ4ZatW7h13DhatWpJeltHHkVFxa67vuMnjvPggw+69r/hhutZsXwFduXo6H/++WfH/i1b0qplSyZOvJttW7fW3CfU3BGisXnTJq77n+tYvGgRGzduZNxvxjFyxAg2b9qEhobZZGbWG29w4YABxBgMXHLxJXz44QfYrDZXKu6XnTMaWbN6Dddd9z888vDDnDx1Eg2NouIipjz7LN26nU+MwcDYsWPYuuXHmvbV2Ld/H7feOo53F7zLj5s3MXbsGAwGPQMG9Gf1mtXODOr9847OAkdY3r6vDT7C8ra1rRSJCAu/Pp4V9z5pvXzNfXm9EVbdEVfjRFiefaL7WebpB46wPGyQEVag8fPQIyxvHzRN48orr2TKlCnk5uaSm5vrE2HNmzeXiRMnMmfOHLp27epaTjDW1Z7Of/mJoOrDXZlLS0vZtHkT3bv34PLLr3CpoWM7T2symVi2bBkdO3Zi9erVtGuXzq8GDyYlJYU1a9cy/q67SElJ5qGHHqLSZGLWrDcwW8w8/9zzKGDGjOnMmzePMWPGcNOYm/ji88+Z/NhjtE5L44Ybb8L/PYFj2Zo1axg//i5SkpN56KGHMZkqmTXrDSwWM889/wLG2FjcQyqL2cyMGTOYN28uY8aMZcyYm/j88y9q8mvNrbfdWtOcbndsSnHy5EmWLF3Ct99+g06nIysri61bt/L8c8+RlZXNVVddhQIWfvghDzxwP9nZ2Uydmsuh/ENMmTIFBTz00MN069adZcuWccmll3HNr38NwA8/bGTZsmWkp6dTWlZGcnIyv+zdx8KFCxkxciQxhhiPGz7sinfffZepU6YwctQoRo4cxTffLOOvf/kLiYmJPPH4E+j1eioqynnqqaeYOfN1Lr30UsaMGcOqlSs5sH8fNquNX/bsYdKkSezcuZNRo0bRoWNHli5ZSlFREcqu+H7dWu66806SU1IcbWs2MeuNN7BYLDz//PMA/O2vf+O9BQuYcPfdtG3ThlWrVrF6zWpyLsqhpLKSP/3paTZv3sxvH3yQ+Ph4vvj8c3788Uf6ZmaiND3OO2LfI1wbjVSaTDz11O957dVXufTSS/nDH//IsqVLuf222zj44os89thk9Aa9z94njh1n2rS/cepUIX/72zTatG5DwcmT/PaB+1m5ciVjx46lY8eOfP75Fzz88CPMnz+fjIwMqquq2b17N/94+WUqKytJT2/HkCE5rF+/juefe565c+fSvn372tPEiedpUxvYuXzlsTyw7xV5ed0rBxr11PCM4Xzv9AngOwMKr/eg6otYg7VeDeMZGTkX15YgzOTxSp6a5D39oKxyT86n/eu1gZ40BQiR6t3cZ3dnf+XlO2tQExF5+2haTeQEubm5AEyZMgVN05gzxxE5zZ49hwkTJrhFRs7dQ38PiqqqamVx+6uqttZYb9/X7ti5S1144YXq6quvVkeOHlNVXuurqmtsVbVavmKlAtR5XbqoJUuXurYpPl2i7ho/XmVlZaut27a59p8+Y4bq3qOHytu0WVVVW9WRo8fUseMnatK1qjVrv1ft27dX99//gCotK1dV1VY1b958Bah58+a7tmto+t5/zvyqA+TnXO60paVl6t5771PJycnqjVmzlMlkVpaqajVjxgwFqBdeeEFVV1vVofx8dfXVV6uRI0eq/PzDqtpqVSaTWT3+xBPq8ssvVwcPHVJ79+5VF110kbrjjjtUUXGxOnWqUI0ZM0bdcsstKis7W/3000+q2mpVL730kmrfvr1av2GDqrbaVLXV6mFPFRaq/MNHXP6ePXvU4MFD1MiRI1XByVOq2mpVs2b9WwHq/gceUKdLSlS11abMFosyW6rU6ZISNX78eJWcnKzmv/2Oo75Wm6o0mVRVtVWdLilV48ePV1nZ2Wrbtu2OvKutasaMV1WPHj3Ups2b1YkTBWrYsOFqwoQJ6nRJqbJabcpktiiTyayqrTb10087VFZ2tnr66adVVVW1slptqtJkVmazRVmtNp+/EwWO9IYNG65OnChQVptj+dJly1RycrK68847VVFRsbLabOrw4SNq5MiRqkuXLiovL09ZrTa1cqXjPMzNzVUlJaXqgQceUF26dFH//e47R1o2m6tN5s2fr6qtVmW12dTGjRtVjx491Guvv66sNpv6accOlZ2drQYPGaI2bNigrDabKiwqUrfddptKTk5Wa9asUVabTdlsdle67r7Nx6/f2gL4tdYewA/izx7Aj4S1+/r2CFp7AD+4PxXAj5BV7lbV66u6rArgB7CRYsqUKQpQU6ZMUXPmzFGAmjNnToRzcWDweTbppcwqQESklMJms2G1WtHp9S6FxGs71x1djf8///M/XHTRxa5lx44dY/u2beTkDCE1NZVTp04BkNElg1/27OHAgf1kZmbSunVr7HY7R48eIS8vj7Vr16KAffv2Ul5ejtFo9LybrHnWe9wr/cLCUyhVm/7BmvS9x6TatGmN3Wbn6NGj5OVtZO3atVCTX0V5OXFxxto7EFWbc9eu53PJJZegNxgARWa/LACsVhsKxcGDh9iwYQOTH38CQ4yBU6cKAUW3bt149513OHbsOH369CYzsx/bt2+jqKgIs9nM4cOHue9//5cdO3Zy8FA+HTt1ZsfOnWRlZdG583k+7Q2KlJRUUpJTOHWqkM2bNrFhwwZKS0sxm82cLDhBXFwcmzZtIjk5mVvH3UpCQiIohU6nRwOOHTvGtm3buPjii/n1Ndc47gqVwmCIQQOOHzvKtm3byBmS42jbU6dAg4yMDPbs2cOB/Qc4//zzad++HYsXL6ZVqzTuv/9/6dIlA51BD0qRnJJMm9atef3110lOTub2O+4gvW06ms57NM39fKw9zs56r127lrKyMq759TCSU5JRSpHeLp2rrrqa//znP2zduo2s7GxXipaqKl59dQavv/468+bN47JLL0MphcVsYdPmTVx44UD69u1LUWERCkVSUjKdO3dm988/Y7FYXEFD/+xsevfpAwpSU1Lp1asXCxYscMxeVfjUofHHoAjgexTC08drnRvhjEF5W3/51N7Be/uhWQL4DQ+RArWjf+sck/IYg/L23a3ytrh8j/b36YcDWAL4AWz4+EZSs2fPZvx4R+TkPiYF4b8HFfQjPvcnFmlpabRr146jR49SWHiKtLQ0twycoZz7lyWgTes2GI1Gh68UhYWF7Nmzh02bNjFr1iyf/MwmM5oGW7ZsZeqUZ/nyyy/p378/3bp393gGrLmejeN65q0Bp+pJ32Qy1z7zdHu8u2XLFp/8ahtBq93Y5eO5rLYkHv6hgwcpKytj6pRnmTrlWY+1ycnJWK3VxMcn0KtXT/7971kcOnSI08WniYuLY/CvfkWvXj35YcMGLrigD3t/+YX+AwaQmpLiIbBOm38onylTnuXtt9+mZ69eZPXr51FEs9nEoUMH6Xr++bRunVZTjdrpAM5jM3DQIBKTksBrvWfbvuHbtmYTiYlJTJk6Fbvdzj/+8TL/+MfLjBo1ir/9bRq9evemQ/sO/G3aNKY8+yxPPfUUTz31FHfffTfPP/8C6e3a+elmatveOU5iNps5cfw4AF0zuni0etu2bQE4dOggmtuaN//9b8d4Io6JPja7DYPegMls4tDBg2zalMeggQN96tSrVy+UXYHm9szc3fE83EGjgccEEadfazWUpvz4vtahCm6WAH49CuZ9dntYZ3Lefh3Z+1rNv+/Myc2v6fZcJXD6DbGu2mle12WdFazf+oxBuSZQOMuoGmT9NpDbI7b6rdP19l0uTn328N2qhB/fF42MjAyXd/DgQVff4dEgzhYIdizKbUwq+DEot38nJyfTu08fli5dyo4dO+jZs1f9ERiefosWLejatStZWdk89/xzxMXFe+SXkpzMoUOHmPzY/2E2m1m79nsuHDiQX/bs4bbbbnXLS+F5N+q4k2nZsib97Gyef+454uITcJ+ll5ySUnvEag5N/qGDtfl9v46BAy9kz5493HbrrV6t4H6IvVvH/5KOnToB8MQTT/LY5Mke63Q6HcnJyQAM+tVgkpOT2blzJydPnqJnz1506NiJ7t17sHvPbnbv3sPu3buZOHESMbGxjrtqau/IykpK+f1Tv2fD+vV8+ulnXHvttRQVFzP+rjs5fvwEAEZjHB07dmLz5s2Ul1d4JoCiZYsWdD3/fIoKC6mqshAfF4f7HaRzfXZWFs89/zzxcfEeZ3xyUjKg6NypM2/Nns2UKVOYM3cu/3rlFX73u0eYUzNOk52dzUcff8zOnTt5/fXXeWPmTOx2O6/8618kJia5nTW4jjBoriNujDOS3q4dAKdqIh5nQcrLywFo06Yt7tFMQUEB99xzD3FxccydO5errrqaK668kjhjHB07daJPnz78+8036dG9h8cFazQaiYuL8z2w/m5R/S3TvJZ79UeBxkqCGpNy873PzbDHoLytqvUdyToL5u2HaD0awvP41ybf8DGpQO3srQeuvpbAHbp/6zsG5Vzu73jUa+sNoTxt7YMU737YmaqjpAHHoLx9PxFR7ZjTbA4ePOgzJlVXRORoz9qABbdc/eEjUO4THdx9fzY2NpaRI0cxZ/ZsXn7pJfr07kPvPn1cj/++WbaUARcOJL1tWzdVrLl7qRGEdu3a0b1HDzZu3Ehx8Wn69evsU+TDh4+wcuVKpk7NZdCvfgVAtdWK3a5cZXRLmYqKCoevQXp6Tfo/bKSo+DT9OnUO0BS48sx3z2/QIEd+1bX5ud9Ced6haR4peTe8BnTu1JGLLrqIH37YgMVspmOnjn7P9K5dM7jwwgv5cfNmSkpKGDx4MCnJyWRlZ7Fs2VKWL/8OgN59etfeubrKpDh+/Bg///wzw4YP58qrrkSn12OzWbFaba670fj4OHr27MmxY8f4esnXZGVnYTDEoLBjtdpo07Yt53ftytq1a9m0aTNXXnklADa7Y0Zierv29OjenR82bqS4uJhO/Tr7XLAoRUlpKS1SU+mS0ZWnfv8UBQUFbFi/nrLSMtq2aYvJbCY5KYnMvpk8l/sc+/ft59ix41jMFpISk3zaFTQsFgtVlqqa00gjOysbgO/++y3Dhw3DGGektKSM779fS/v27RkwoL/HffQ999zDyy//g6PHjrJhww+8+uoMMjMzadO2Db169uLfs2axfft2hgwZ4nExeRWk9t/+rjC/u7lFQB5+/dYZijQ8QnLi6XsXy+N+17m7t98AW3vgNf++MydvP8D15Gvx72veEVLA5BtkPQ93+BGRh+/V4QcV4nhvHuTurvbBs70a3iAac+fWztabMGGCKwWnSE2dOjWoWXruy/2hf+aZZ6fWsb5eOnfujMFg4O235/PBBx+wf/8+ftzyI3/9y4u8+OKLpKakctHFF3P48GHmzZ3L5VdcwWWXDXXtbzQaMRpjefvt+axYsZxYo5GiwiIWLVrEiuXLybnoIioqKlixfDm7d++mRYtU8jZtYsqzz/Djjz/SrVs3brjhRhLi4yktK+OjxYvZu3cvLVqkUlBQQI+ePYiLi3Olb4wzUlRYyKLFjvQvuvgiNE3nFkBplJWWstyVXws2bdrEs2753XjDjSQkJOB+ClRbrSxduoQjR44wZuwY0tJaA4r8/MPMnTuHK664ksuGXkZKSirlFeXMeuMNNm/eTGJiEkeOHmHunLkcOHiA7Oz+oDmimx0/7WDRokUcOHCA3z36f3Tu3Bm73c67777LhvXrycrK4q7xE2rKUnuwFVBdVc2yZUv54YcfSE5O5sD+A0ydOoVvvvmG9PR2jK0pY4f27di4cSOffPwxhw8fobqqipmvz2TZsmVcO/xakpKS+fDDD1m1chWVFeUcOXKEPzz1FDGGGAYOHEis0cjb8+ezYsUK4oyxFBYVsWjhQpavWMFFORexe/fP3Dx2LHv37UNDY9PmTXz4wYd06XIeY8fezMa8jdxy882Ul5dRba1m9apVfPrppwwcNJBRo0dhiPG6h9Jg/Yb1fPTRYuxKYbXaHELbqyfHjh7l/fff50RBAWazmZf+/ncWLFjAvffdx7hbb0Nv0HM4P585c+Zw3fXXc/XVV9OqZStA8dJLL9GmTWuGDMmhQ4f2bNyYxzvvvI3ZbMZmt7N2zVpmzJjOhRcOJDk5mcJThSxevJhOnToxbNhwYgwxAKxatdL1zkjnzucFf0G5lN3Lj4R1UsctaySz959dY1awASUIM9nwI6y6I4boo/Z4OcVp9uzZTJhwN84GueLyy4Fakbr88stdDRXue1BYvGbx1ffnmt3nNuuvtKxcvf322yozM1PV1Ea1atVKvfDCn1VhUbGyuM3imzo1t3aWn3PWn6VKLVm6TA0aNMi1/3lduqi/v/SSKj5doipNJjV9xgzVqlUrBahBgwapjz76WE2cNEkNGzbMNbuvpLRMPfXUH1xpjBw5Up0oOKksVdV1pu89g6/SZG5Qfu5/JTWz+LKystX2n35yze5b4VZv56y/SpNZvf32O6pnz16u8mRm9lPz5s13zZCrrraq1157TQHqoosuUnv37lXV1VZVcPKkGjlypALU73//lDJbLKq62uqYqedmq6qr1cJFi1TPXo48evbqpea//bb6/VNPecwErLba1Pbt29UNN9zgcexeeullVVpWpixV1erDhQtVZr9+rvWDBg1SS5d945qluXTZMjVo0K9c67t06aJeeulldbqkVB09dlzlPvecqy0BddVVV6sffvhBVVtt6pdf9qrfPvigSk5Odq2/+eab1d69+1R1zcy9aq+ZfN+vW6eys7Nd2y9Y8J6yWm2qoOCkevLJJ11ptWrVSv3pT39SxcWnXfu6z+KzWh0z6U6ePKXGjh2runTpotauXausVpvaf+CAuvPOO115JCcnqwkTJqg9v/yirNbaWXz33XefKisvd83Sy83NVYBauXKlx+y9QH82Hz/QLL/6Z/3VP8uvPtuAWYDNNMsvErP+IjPLr5FnAargZv0pj+W1vl+rAvheM+f8zfrbv3+/grpn6zln93333XcBtwmWiH7N3G63U1JSgt1uJzk5mdjY2KD3Ly0pQeEY3zIYDB53HJUmExaLmaSkZGIMBmfA4xPqlpWVU1VlITkpmVij0TVpw26zU1JaimP2WAoGvd5zR2dONb7JVInZbCE5OQmDIcatJGHcorphtVopKysDIDU1FZ2u/vemg73/rK6qorSsjJTkZGJiY+u8gysvL8fibDfnsavZwa7slJwuAc0xW02n13lk5L4+OSnZM/JRUFVdRVlZGTqdzm9dTSYTlZWVGAwxpKQkuz0G8F8zq9VGWVkpOp2+ZnsdzocXlaZKKitNJCTEkxCfgO8XIfx/GcLf0SwvL8disZCQkEB8vOf4aINoioioSYvj9R5UU0dEzpAlQhFRYyYfndRzvOp4BLn8u++48sorPWbref/swvsAACAASURBVEZCMG/eXMaPHx+x96Dk96DqIezLz1l9twS89TC4M95Pgo3WIdQvwJHsnnxT90NTdPgRPD2dHbq379vh12+de3j7Ea1AE7dv9B2+xmwAzw7fp8f244cz5nRmCGbdyO9B+REMTz/QDm45hqw4jd9DNnoH0BQdWkg1bliBghGIhn6bLyyiTIAb/3A2siB4l8DZo3v4odtwBOPMFJB6jpefWX8NDXB8LCEIlE/xfJS94RGYzy2Bf4XA9xA2XoceLmFfbn70L7I9RKAMGiND3wI4O3RPP2r1q8l7mPqL1/AIrEkVLHCBIyqo4WQXQu1CIMINUmcI5auQzkdqfiMwZ6oBBLRp2ic4oj6CanCH3gDrowDeCdQrkE1wwUe6fYItbRN3OIFrFJkChRMRNYL+NHv7Nn72zaFYfhQsTD3wTj6ciCiaOvygj0+AiChis/Sou318BCqY96DqHYOKWIQUPYRz+Tl3iFiEVGeCkcgg+OPT6PoVKMPGrG4EadwxqAi0aPOePk1//tRLhBukjkkIbj14QEUMZ0yqcdqncYn6CMqboE+XOg6of7106/C9E6hXYCN1SYV+CkXycvJbmmbuwBpeY/8FrJ204OmHM+YU0Qs+SgQiEtn7z64xK9iAEkRSX5yLA+tJg0oX3dRzvBo5wgp+DMqn/w5+zOlciLBCvpzc2jcyCfrJoBk7iKjRr0hWLwx8ixcowvL2IxFhNbOCBaEnkciuAbWLAGGWOMgIq87N69896nvXqIug6j28PgIZ+IA4N3AKaAN3qEMgm/8SaIwLttEzaEJFCS5Ccm51ho85NWlx5D2oRjw8TUA9x6sexas7QnIuDuBT275aAxtI3oOqh7AvP2f13RIIT//8JNhoHUL9itPketYUHX4ET095D6ru4kXf4WvMBvDu8J2LA/vhRERnhmDWTdRFUMES9Onk1r97+P70tM4d3HKM4ll/jd4BNEWHFlKNG1YgeQ+qYcVpvMPZyILgXQJnj+7hh27DEYwzU0DqOV4BxqTkPagoOcRhX25+9C+yPUSgDBojQ98CODt0Tz9q9avJe5j6iyfvQYWaXQi1C4EIN0idIZSvQjofqfmNwJypBhDQpmmf4Ij6CKrBHXoDrI8CeCdQr0A2wQUf6fYJtrRN3OEErlFkCiTvQTV19s2hWH4ULEw98E4+nIgomjr8oI9PgIjI/xiUvAfV7IRz+Tl3iOwsvUAJRiKD4I9Po+tXoAwbs7oRRN6Dqrs4TX7+1EuEG6TOSQjOxYEVMZwxqcZpn8Yl6iMob4I+Xeo4oP710q3D906gXoGN1CUV+ikUycvJb2mauQNreI39F1Deg6q7wI1+/jRqBRtQgkjqi3NxYD1pUOmim3qOVyNHWPIeVIQJ+3Jya9/IJOgng2bsIKJGvyJZvTDwLZ68B+VPTyKRXQNqFwHCLHGQEVadm9e/e9T3rlEXQdV7eH0EMvABcW4g70EF0b82e48QXgHkPajGLo68B9WIh6cJqOd41aN48h5UlBH25eesvlsC4emfnwQbrUOoX3GaXM+aosOP4Okp70HVXbzoO3yN2QDeHb5zcWA/nIjozBDMujFYqqqbuwyCIAiC4IOmlEcAJQiCIAhRga65CyAIgiAI/hCBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKhGBEgRBEKISEShBEAQhKglboPLy8hg9ejTff/+933X33nsvxcXF4WYjCIIgnGMYwk3AbDbz5ZdfkpCQQO/evWnZsqXHuvz8fOx2e7jZCIIgCOcYEXnEd8MNN2CxWPjPf/4TieQEQRAEITICZTKZuPXWW5k7dy5Hjhypc9uKigoWLFjANddcg6ZpXHPNNfz3v/9FKeXaZvr06SxevJg9e/Ywfvx40tPTmThxIvn5+ZSVlTF16lQyMjIYOnQo69at88lj27ZtjB8/npSUFK655ho+//xzieIEQRDOMCI2SeLSSy+lW7duzJ8/H5vN5ncbu93Oa6+9xjfffENubi4HDhwgJyeHhx56iB07dri2O336NH/+85956qmnGDNmDDNnzmTHjh1MnjyZ++67j/bt2/PWW2/Ro0cPnnnmGY4dO+bad8WKFdx5551cfvnl7Nmzhz/+8Y+8+OKLfP7555GqqiAIgtAEhD0G5SQ+Pp577rmHhx56iFGjRpGVleWzjU6nY/LkyWiahqZpADz66KPk5eWRl5dH3759Xdt26NCBV199lXbt2gFQVlbGCy+8wOzZs7n00ksBaNOmDePGjePgwYO0b9+e0tJSXn/9dR588EHuvvtuNE0jPT2dJ554gk8++YRrrrmGxMTESFVZEARBaEQiOs28f//+jB49mvnz52OxWPxnqNOhaRpVVVXs3LmT7777jrKyMg4ePOixXadOnUhNTXX53bp1IyEhgTZt2riWGY1GYmNjXRHbgQMHOHr0KJdddplLAAEuuOACKisrMZvNkayuIAiC0IhEVKAMBgN33HEHGzduZMWKFX63OXnyJE8//TRZWVlMnz6d8vJyD9EJh7KyMlatWkWfPn1cUZqmafTp04eTJ09itVojko8gCILQ+ETsEZ+TLl26cPvtt/Puu+/ym9/8xmNdcXExDz74IBdccAF5eXkkJiZiMplYv359RPLW6/UMGDCABQsW0Lt374ikKQiCIDQPEf+ShKZpjB07FovFwurVqz3GfAoKCiguLmbcuHGNMhbUpUsX0tLSyMvLi3jagiAIQtPSKJ86atmyJRMnTmTGjBlUVFS4lsfFxVFZWcn69eux2+2Ulpby6quvsmDBgojk265dO+6++27+8pe/8OWXX2K1WrHb7Wzfvp19+/ZFJA9BEAShaWi0b/FddtlljB8/3mPZeeedx8MPP8xjjz2GXq/n0ksvpVOnTvzud7+LSJ6apnHLLbfw9NNPM3nyZGJiYtDr9fz2t79l9+7dEclDEARBaBo05f6GbBNhMpkwm80kJydjMER8GAwAq9VKWVkZAKmpqeh08l1cQRCEM4lmEShBEARBqA8JKwRBEISoRARKEARBiEpEoARBEISoRARKEARBiEpEoARBEISoRARKEARBiEoi+hKS1Q5VNpDfBhQEQTg30ekgVg+GCIQ/ERGoUjOUWxRV/n+nUBAEQTjHiNVDklEjJS70NMJ6UddUDUWVimoRJkEQBMEPMXpolaARHxP8viELVLkFTlXU7poQC4mxEGfQ0MvIliAIwjmJzQ5mq6KiCiqrape3TtRIMgaXVkiP+EzVteKk10FaAiTEanz11VcsWrSIdevWcezYMeQrSoIgCOcGmqbRvn17cnJyGDt2LCNGjKCySlFY6RCtUxUKvS64SCqkCOpIieOxnl4H6ckaJUUnefh3j7Lkq/8Em5QgCIJwFjJ8xEimv/JPUlu14USZwmZ3PO7rmKo1OI2gBarU7Bh3AmibBBUlpxh13Q3s2bWDlBatuP/BR7j5xtGc37WrfEFcEAThHMFut7Nv/34WfvwFM1/9F6Wni+jR+wK+/OwTElNbU1Du2K5VQsMnTgQtUEdLHLP1EmKgbbLGuNtuZ8lX/6H/oIt47+3ZtGvXLth6CYIgCGcRx48f59Y7J/Ljxu8ZPmIk7y94l4IyRWW1Y3ZfhwZGUUEJlNUOh087Nm+TBCu//ZrbbruNlBatWP/9GhEnQRAEAXCI1JCLLqH0dBELFixg6NXXcrImiurUQmvQe1JBPYNzveekFHEGjUWLFgFw/4OPiDgJgiAILtq1a8f9Dz4CwKJFi4gzaFATDzX0ndmgBMr9CxF6Haxbtw6Am28cHUwygiAIwjmAUxvWrVvn8fpRQ782FNYshmPHjgFwfteu4SQjCIIgnIU4tcGpFcESlkA5h69ktp4gCILgjVMbQn0nVpRFEARBiEpEoARBEISoRARKEARBiEpEoARBEISoRATqDEPTNDSt4d+yEgRBOFMRgRIEQRCiEhEoQRAEISoRgRIEQRCiEhEoQRAEISoRgYog06dP55lnnsFsNjd3UQRBEM54QvrJ91ApLi7mySefJDs7mwceeAC9Xu+z7v7772fgwIFNWp5Dhw4F3Gb06NE8/PDDDUrv9OnTnDx5Un7qXhAEIQI0qUDZ7Xby8/NZtmwZOTk5DBo0yGddU0YfCQkJTJo0yZXn8uXL+frrr3n66adJTk4GID09vcnKIwiCINTSpAIFkJqayogRI3jzzTfp27cv8fHxTV0EF0ajkZycHJd/5MgR1q1bx8UXX0xaWlqzlUsQBEFohjGokpISrrzySvbv38+qVavq3T4/P59HH32UtLQ0cnJymD17NhaLBYAvv/ySSZMmUVhY6Np+69at3HjjjWzdutW1rLi4mHvvvZdvvvkm5HIfOHCAxx57jIyMDHr37s20adMoKyurcx+r1co///lPHnnkEYqLiwEwmUzMnDmTrKwsMjIyeOKJJzh+/Lhrn3379vHoo49y9OhR3nvvPXJycsjIyCA3N5eKioqQyy8IgnCm0SyTJDp27Mhdd93Fq6++SkFBQcDtdu7cyS233EL79u3ZsmULM2fO5KOPPmLmzJkopejcuTN5eXkcOHDAtc/333/P+vXr+f77713L8vPz2bNnD507dw6pvDt37mTcuHEAfPHFF8yaNYtVq1Zx7733cvLkSb/7KKX48MMP+eCDD3jggQdo2bIlFRUVPP7446xcuZJ33nmH1atXYzQaPQSsurqa9evXc99997Fp0yb+/Oc/84c//IE5c+awZMmSkMovCIJwJtJss/hGjhyJ0Wjk008/9TupoKqqirlz53LVVVcxefJkOnXqRP/+/Xn22WdZtmwZx48fp0uXLvTo0YNdu3YBjuhk+/btPProo/z444+YTCbAITDnnXceHTp0CLqcFouFt956i169ejF16lQyMzMZOnQo06dP5/Dhw3z22Wd+91u5ciXTpk3jr3/9K3369AEcY1y7d+/m73//O1lZWXTq1InJkyejlGLTpk2ufU0mE+PGjWPatGlcffXVTJgwgeuvv57Vq1cHXX5BEIQzlWYTqJYtW3L//ffzwQcfcPDgQZ/1p06dYsOGDQwfPhyDoXaorGvXriQmJlJaWkpKSgrZ2dnk5eVRVVVFQUEBp06d4pJLLuHUqVMUFBRgs9nYsmULAwYMcE18CIbCwkLy8vIYNmwYKSkpruWdO3dm6NChbNq0yWdix+HDh8nNzeXxxx9n6NChANhsNtasWcMll1ziIZQtWrSgb9++Ho/5nPV0fnPPaDTSunVrKisrgy6/IAjCmUqzvgd1ySWX0L9/f/79739jtVo91pWVlVFcXMzll1/u+kCqpmm0bduWbdu2YbVa0TSNIUOGsG3bNkpKSjh48CDt27cnMzOTtLQ0du/eTWlpKT/99BODBw8OqYzOcmRkZHgs1+v1xMfHU1BQ4BoTAygqKuK5555j165d7Ny5E5vNBjgiwqKiInJzc9HpdK766HQ6cnNz5d0pQRAEL5pVoIxGI3fddRerVq1i+/btHuv0ej0Gg4EVK1aglPL427FjB3379gWgZ8+e2Gw2Dh06RF5eHgMHDiQ1NZUBAwawceNG8vPzsVgsdOrUKaQyOsvhPhEDHGNMNpuNtLQ0YmNjXcsXLlxISkoKX375JWvWrGHlypWA4yvker2eKVOmYLfbfeo0adKkkMonCIJwttLsX5Lo168fY8aM4Z133vGYct6+fXv69evHmjVrXFGIP9q2bUvPnj1Zu3Ytu3btol+/fgBkZmaya9cutmzZQkZGBm3btg2pfM5yrF+/3iPKO336NFu3bqVnz57ExcW5lo8aNYpnnnmG/v37c/vttzNjxgxOnjxJXFwcF154IZs3bw44sUIQBEGopdkFStM0xo4dy8GDB/nkk09cyxMTE5kwYQLz5s1j3rx5mEwmlFIcOHCAH3/80bVdfHw8AwYM4IsvvqCsrMw1vtO1a1fKy8t55513GDBgQMjvWznLsXDhQubNm4fFYqG0tJRXXnmFgoICbrrpJo/fZ+rUqROpqamuehkMBubOnYvNZmP06NFomkZubi7Hjh0DoLS0lNWrV3s8JhQEQRCiQKDAMe38vvvu85nEMHToUF577TVmzpxJQkICOp2O6667ji1btnhEVYMGDWLbtm1kZGTQsmVLAFq3bk2XLl3Ytm2bxxcrQmHo0KHMnDmTN954g7i4OFJTU8nLy+PNN9/0GZtyxzkRZN68eWzcuJH09HSmT59OeXk5HTp0QNM0unbtyqeffkp5eXlYZRQEQTjb0FQQH44rt8CpCgVKkZGmc4mB8x2exsJut1NSUgJAcnKyx6y+psRqtVJWVoZOpyMlJSWsX7YtLy/HYrGQkJAQVHTnzFO+9ycIwpmAu04cKLSDptE6USPJWP++zdPTB4lOVyuGzYnBYIhYOZKSkkhKSopIWoIgCGcjUfGITxAEQRC8EYESBEEQohIRKEEQBCEqOSPGoIRaZHKEIAjnChJBCYIgCFGJCJQgCIIQlYhACYIgCFGJCJQgCIIQlYQlUM6vGtjt9ogURhAEQTh7cGpDqF/dCUug2rdvD8C+/fvDSUYQBEE4C3Fqg1MrgiUogdK5bW21Q05ODgALP/4ipMwFQRCEsxenNuTk5GB1e9Cma6DyBCVQsfraf1uqFWPHjgVg5qv/8vnJckEQBOHc5fjx48x89V8AjB07Fkt17Tuc7lpSF0EJlEFXm3C5RTFixAiGjxhJ6ekibr1zooiUIAiCwPHjx7n1zomUni5i+IiRjBgxgnKLQ6Bi9Q4taQhB/dwGQKkZiiodP1PeNkmjsvQUo667gT27dpDSohX3P/gIN984mvO7dkXX0DhOEARBOKOx2+3s27+fhR9/wcxX/0Xp6SJ69L6ALz/7hISU1hSUKzRNo1WCRkpc/elBCAIFcKREUW1V6DRFuxQdJcWnePh3j7Lkq/8Em5QgCIJwFjJ8xEimv/JPUlu25nipHbvSiDFodExt+Iy+kATKVA0nyhw/XKjTFGmJOhKNGl999RWLFi1i3bp1HDt2TL4bJwiCcI6gaRrt27cnJyeHsWPHMmLECCosisIKhzihaaQna8THBJFmKAIFnr+uq5QiPgaS4nTExYBBF/ovzQqCIAhnLla7wlwN5WY7puqad6CC+BVdd0IWKHBEUkWVjsd9CqAmKYmbBEEQzk1c4YmmoQExBse4UzCRkyuJcATKSanZMauvyirSJAiCIECsQSPJ2PAJEf6IiEA5sdqhygby5SNBEIRzE50uuKnkdRFRgRIEQRCESCEvKgmCIAhRiQiUIAiCEJWIQAmCIAhRiQiUIAiCEJWIQAmCIAhRiVbYqbvM4hMEQRCiDomgBEEQhKhEBEoQBEGISkSgBEEQhKhEBEoQBEGISgzeC1rl72mOcgiCIAjnMEWde/gsi7oIavr06TzzzDOYzebmLoogCILQjPhEUMGSl5fHH//4R49lHTp04MYbb2T48OEYjcH9QtXp06c5efKk/BqvIAjCOU7YAmU2m9m/fz9TpkyhY8eOAOzcuZMXXniBJUuWMG3aNBITE8MuqCAIgnBuEbZAASQkJDBo0CB69eoFwBVXXEFOTg633347P/74I5dcckkkshEEQRDOIRptDOq8887jvPPO48CBA65lBw4c4LHHHiMjI4PevXszbdo0ysrK6k3LarXy0UcfMXToUNLS0pg4cSJ79shkDkEQhLOZRhOoQ4cOcejQIbp16wY4HvuNGzcOgC+++IJZs2axatUq7r33Xk6ePBkwHavVyssvv8wbb7zBCy+8wI4dOxg4cCD33nuvh/gJgiAIZxcRecRnt9spLS2lsLAQu93O1q1bmTZtGldeeSX9+vXDYrHw1ltv0atXL6ZOnUpKSgrgiLLuuOMOPvvsMyZNmuQ37e3bt/Pxxx8za9YssrKyALjnnnvYvXs33377bcD9BEEQhDObiERQ27ZtY/DgwbRu3Zq2bdsyadIkrr32WtcEicLCQvLy8hg2bJhLnAA6d+7M0KFD2bRpU8Bp5evXr6dfv350797dtcxoNDJw4ECOHj0aieILgiAIUUhEBCo7O5tdu3ahlOK9996jdevWjBw50jV7r6ysjOLiYjIyMjz20+v1xMfHU1BQgMVi8Zt2QUEBb775JomJiWia5vobP348FosFu90eiSoIgiAIUUbEx6BGjhxJ3759mT9/PlarFXAIkcFgoLCw0GNbpRQ2m420tDRiY2P9pmcwGLjnnnuoqKhAKeXx98ILL6DTRd27xoIgCEIEiHjvnpKSwqRJk1i8eDFr164FoH379vTr14/169e7RAscL+Vu3bqVnj17EhcX5ze9wYMHs3nzZvLz8yNdVEEQBCGKaZTwY8iQIYwePZoZM2ZQXFxMYmIiEyZMYOHChcybNw+LxUJpaSmvvPIKBQUF3HTTTWia5trfZrO5viSRk5PDkCFD+MMf/sCePXtQSmEymVizZg2lpaWNUXxBEAQhCmgUgTIajUyaNIndu3fz5ZdfopRi6NChzJw5kzfeeIO4uDhSU1PJy8vjzTff9BibysrK4oMPPuCVV14BIDExkeeff56ePXsycOBAdDod6enpzJo1i+Li4sYoviAIghAF+Pzke2N/zdxqtVJWVoZOpyMlJcUjcgLHuFRpaSl6vZ6kpCSPdSaTicrKSoxGo886QRAE4czF39fMI/IeVDAYDAZatmwZcL2maaSmpvpdFx8fT3x8fGMVTRAEQYgiZAqcIAiCEJWIQAmCIAhRiQiUIAiCEJWIQAmCIAhRiQiUIAiCEJWIQAmCIAhRiQiUIAiCEJWIQAmCIAhRiQiUIAiCEJVE9EsSzg+8CoIgCOc23p+xC4WICJRSiqqqaipNFqqqqrHJjwgKgiCck+h1OmJjY0iINxIbGxOWUIUtUEopSssqqKj0/5PtgiAIwrmDzW7HZLZgMltITIgjJTkxZJEKS6C8xSkxMZ6EeCMxhib/Bq0gCIIQBVRbrVSaLFRUmFzaEKpIhawkSikslipXAVq2SCY+zhhqcoIgCMJZQIzBQGqygdgYA8Wny6ioNGOMjcFojA1apEKexaeUotJkARyRk4iTIAiC4CQ+zkhiouPnkSpNlpAm0YUkUEoplFJUV1sBSIgXcRIEQRA8cWpDdbXVpRvBENZ7UM7ZejLmJAiCIHjj1IZQZ3aH9YhPEARBEBpCkz3iEwRBEITGRgRKEARBiEpEoARBEISoRARKEARBiEpEoARBEISoRARKEARBiEpEoARBEISoRARKEARBiEpEoARBEISoRARKEARBiErOKoE6ceIEeXl5WK3W5i6KIAiCECZnlUDNmjWLSZMmsXfvXtey8vJyTCZTM5ZKEARBCIWzSqDuu+8+3nrrLbp16waAzWbjxRdf5KOPPmrmkgmCIEQHhUWnG2XbxuCsEqj09HQGDhyIoeYT71VVVRQVFTVzqQRBEKKDd97/lCl/fqVe4dmz9wAPTc7lnfc/baKS+adZBer111/n6aef9ngEt2TJEu644w7y8/Ndy/bt28edd97J9u3bAZg+fTqLFy9m165d3HTTTQwfPpwTJ07w6aefcu+991JcXMzx48d57LHH+Prrr3n55ZcZPnw4zzzzDGaz4yfqTSYTM2fOJCsri4yMDJ544gmOHz/etA0gCILQhLRqmQpQp0jt2XuAV16bB0CPbl2arGz+aFaB6t69O8uXL6egoABwPJJbuXIly5YtY9euXa7t9u7dy6lTp2jXrh0Ap0+f5t133+Xpp5/mN7/5Dbm5ubRo0YLy8nLy8/Ox2+0kJiZy3XXX0a5dO6677jr+8Ic/MGbMGGJiYqioqODxxx9n5cqVvPPOO6xevRqj0cgjjzxCcXFxs7SFIAhCYzNy+BXk/Ko/4F+kPMUpg5HDr2jyMrrTrALVs2dPNE3j8OHDABQXF3P06FHuv/9+1q9f7/qBq40bNzJgwABatmzp2nfXrl1MmTKF3/zmN+Tk5GA0ev7sfHJyMoMHD6ZFixb06NGDK664gv79+6PX61m+fDm7d+/m73//O1lZWXTq1InJkyejlGLTpk1N1wCCIAhNzB3jrvcrUt7i9Lvfjm+2MjppVoFq27YtmZmZbNiwAYCDBw+i0+m44oor2LFjB6WlpZSVlbFz505ycnLQ6/WufYcOHUqPHj2CztNms7FmzRouueQSOnTo4FreokUL+vbtK4/5BEE46/EWqXU//Bh14gTNLFDx8fEMGDCAHTt2UFlZyU8//URmZib9+vWjurqagwcPcuLECU6ePEmvXr0ikqdz4kRubi46nQ5N09A0DZ1OR25urmuMShAE4WzGXaSckyGiSZwgCmbxDRo0iL1793LkyBG2bt3K4MGDadmyJT169GDnzp3s37+fpKQk1/hTuGiahl6vZ8qUKdjtdpRSHn+TJk2KSD6CIAjRjrtIRZs4ARiauwBdunTBaDSyYcMGCgoK6Nq1K3q9nr59+7JlyxZatmxJdnY2KSkpQaet0+kwGAzYbDbXsri4OC688EI+++wzTp48Sdu2bSNZHUEQhDOKO8Zdz4hhl5PWqkVzF8WHZo+gWrZsyYABA1i8eDGpqamuiRD9+vVj3bp1LF26lCFDhqBpWtBpJyYm0rNnT1atWkVZWRkWiwW73c7o0aPRNI3c3FyOHTsGQGlpKatXr8ZisUS0foIgCNFONIoTRIFA6fV6cnJy+Pjjj8nMzCQ+Ph5wRFZpaWlUVlbSs2fPkNKOjY1lzJgxrFq1ipSUFK699lqOHz9Oeno606dPp7y8nA4dOqBpGl27duXTTz+lvLw8ktUTqyJnswAAEspJREFUBEEQQkQr7NRduS9olb+n3p2UUthsNgpOOaYndmjXunFKFyGqqqooKysjISHBJYBOysvLsVgsftcJgiAI4XH0+CkA2rZugV6vD/g0rKiz76zsZh+DagpiY2NJS0vzuy4pKYmkpKQmLpEgCIJQH83+iE8QBEEQ/CECJQiCIEQlIlCCIAhCVCICJQiCIEQlIlCCIAhCVCICJQiCIEQlIlCCIAhCVCICJQiCIEQlIlCCIAhCVCICJQiCIEQlYQmUXufYvdpqjUhhBEEQhLMHpzY4tSJYQhYoTdMwGBw/wV5pkp+oEARBEDxxaoPBEPgjsXURVgQVHxcLQEWFCZNZREoQBEFwYDJbqKgwAbVaESwhCZSmaWiaRkyMwZVx8ekySsoq5HGfIAjCOUy11UpJWQXFp8sAhzjFxBhcuhEMIf/chqZp6HQ6EhPiUEphtlRTUWFyKaYgCIJwbhNnjCExIQ6dThfSI76wBQogKTGe2BgDZks1VqsNu1L17C0IgiCcjehq5ifEGWOIjY1Br9c3vUCBp0gZjRqxsTHY7XbA8au7giAIwrmDU4ScgqTT6UIWJ4jAL+o6C6FpGkop9Hq9iJMgCMI5ilOMnGNOoYoTgFbYqbuoiSAIghB1yJckBEEQhKhEBEoQBEGISkSgBEEQhKhEBEoQBEGISjQlU+4EQRCEKEQiKEEQBCEqEYESBEEQohIRKEEQBCEqEYESBEEQohIRKEEQBCEqCftbfO7IhEBBEAQBCOsbfE4iIlBKKaqrrVSaLFRVVWO12SKRrCAIgnCGYdDriY2NISHe6PqhwlAJ+z0opRRl5ZWUyw8VCoIgCG4kJcaTnJTQPD+34S1OiQlxJCTEYdDrXT+/4bCgabj53lbWy3pZL+tl/dmwvrraSqXZQkWFyaUNoYpUyBGUUoqqqmoKi0sBaNkimfg4YyhJCYIgCGcZJrOF4tNlAKS1TCE2NiZokQo5glJKUWmyAI7IKc4YS4haJwiCIJxlxBljSUyIo6LSTKXJEtJ4VEjTzJVSKOWYGAGQEB9Xm7GmAZr44osvvvjnuJ+QEAdAdbXVpRvBENYYlHO2nvNn3pUCDYcFxBdffPHFP4d9g14PEPLM7pDGoJRS2Gw2Ck6dBqB9elpImQuCIAhnN8dOFALQtnUL9DUT6BpKxN6DAg0QK1asWLFi3W3oROhTR1pNOcSKFStWrFh3GzqRiaBQaIqaZ5A1uim++OKLL/4574dDRCIozesfTl/TZL2sl/WyXtafy+vDIUJjUG7/rm9bWS/rZb2sl/Xn5PpgidzPbWhixYoVK1asHxsiERuDQuEpn+KLL7744osfBhGLoDQ0HP/XxBdffPHFF9/lh0pEXtRt27plWIUQBEEQzk4KThUDob2oK2NQYsWKFSu2cW2IRHAMSsNzOp8GqNrnkLJe1sv6Zlufn3+Ml155nXvvvp3MC3pFXflk/Vm+PkQiIlDgeNKotBoLNdbLl/Xn3Pry8nI+/OgLPv1iCTt27aZjh3ZcP2o4E+64hZYtWzR7+YJdb7fZqDCZSEpIQNPpmjR/q92OyWQiISEBvU4Lan+r1cq+A4eoslThul6jsH1l/dm3PhwiI1BK4RJSsWJr7IFDh8l98R+UlpYx/vab6dn9fErLyvnk8694YdorPPvU/5Gaktzs5QzG7vllP/+aOZs/P/skLVqkNmn+v3jlHcz+zrtbVfPv5m5HseeWDZUIRVCApuH5rQun721l/bmw3lxVxez57xEXZ+Tvf/4TrVunudYPGdQfS1U1CfFxUVv+QOvLKysxmczUvkbfdPmXV1ZiMpsD5NuA9P1er9HVvrL+LFwfBhF+D0rhKqHLx8uX9efC+p9372XdD5v5+5+fIS2tJUop13qdXkd8fBzKbX+b1ca3361i7rsL2bN3P9dceRn/O/EOMrp0BhT5+UeZv2Ah9959Oxs2bmbeuws5eaqIMTeM5J7xtxIfH+9K32wx8/FnX7Pgg48oK69kxLArmXTXrbROawlofPvdKvKPHGPU8Kt45fXZbN2+g//311w6d2zPuo2beX/RJ3y/Po/u52cw4Y5buPbXV6DXG/hqybe8++HH/LxnHw9P/hMGg4HHHvlfMi/oWW/5vdunylLNug15vL/4M75fv9GR1+23cO2wK9HrdT7t+9WS//Lu+x/x8y/7ePjxZxx5P3wfmX17U1VVxXcr17Lgw4/Zun0nFw0ZyH1330F2Zh80nQ6o+aE45UzN8e/9+w/yyutvcdN1Ixh66UWgFMeOn2T2/Pf45IslZJzXid+MvZ7rRv6a2NhYUIq331tM2zatybygF6//ez5Lvl3BBb178H8P3Uf/fhdEzfkn66Npfejop06dOjWUHZVSVFQ67uYSEuIdzxo1j1nw4p/D/ldL/ktZeQW33PQ/GI3GOre32ezMnv8+Xy9bziMPTOSh+++mrLScWXPeIedXA0hOTqa4qJh33v+ItevzsFptjBt7Az26d+WdBYs577xOdD8/AzQNs8nM3/7xGvsOHOSPjz/CneNuYsfPv/DZl0u4OOdXxBuNbNvxM9+uWM3KNesZPDCb60ddS4/zM/h2xWo++ewrfjP2Bh797SRiY2N56ZWZZF7Qh/M6dUDT6TGZzJSUlnLf3bdzUc4gzu/SmZiYmHrL713/Jd8s5+PPv+bWsdfzyG8nYYyN5aV/zSTzgt507tTBZ3u9pqOyJu97776di3MGkZFxHsaYGGa/8yHvfvAR94y/jUd/ew8VFZX87R+v0q5dOj27dQU0Sk6X8NU333HFZRfTvl06hYVFvDDtX2RlXsD1o4ej1+nYu/8Qj//xOfpl9uG5pyeTM2Qg737wESWlZQ7x0TTWfL+Bz/+zjDXfb2DYNZdz4+hrOXAonx82/sglFw8h3miMivNP/OjxK00OnUhMiEOn09HkvweFkmfaYj1tRWUl7dq2ITY2xnV+VFaaqKquAkDTdCQlJqDX69m9Zy9Lv13BC88+Sa8e3QC4+cbR7D94iDXrNnLzjaNRSmE2Wxg1/GquG/lrNE1jYP9+/LJ3P3mbtnDlZRcRExPDuh82sf/gIf723B9Jb9sGgIn/v71zD46qPOPwc/bsJrubBXLhohKSGsBwDyRyrYIyeEcU7QhWcKR4wRtWZ7y1tnam6h+dVmcKTqutw7TjoCgXEwSttHVq2xmnFJoCDVcdFUII5MJi9pI9OefrH2fPJru5SDY7bXTfZ2bz23d/Z7/vPWfP5t3vXFcu40c//Rl1dYeZO7sKUOzeU8u6nz/HZXNn4nDtoiu47qorE1+gW2++nsNHP+Gfe2uZM3MGYy8uobRkNEP3BZhRMZn8YfZ+oIOHj351/inL56v6cjaNONOXdem7skvfB+oOsXnrdn789GOJeVl953I8HjebtlRz6YypjBhe1DmCUopIOML6VzYwauRwViy7Bd3lIhaLsbV6B3NmVbJ65TJ0XWfUyBE8dO9drH9lA9dffWWinUCenxeefYrhwwsBCATyePyZ5zh+vJ5hk8r/p+uZ6NdD0yWD50FpoqJ96obXNzFrwWJmLVjMynvW8sXxetA0/r2/jvLxYykZMzoxfU5uDlMmlnO6qTmpnTHFF9n/1DWNnBwPBQX5RKLtmEphWhZ7avdTNX0qI+PFCU1j6JAA48eWcaa5OdFOZcVUpky6JCk/l8uF5nJhmib1Daf4+B97aWpu4UxTC+2GQXzCZO1n/o664icsmpZF/clTfLx7L03NzZxpbqE9Fut9OZIcHzh4hJIxo5kcLwxoGpqmMWdmFY2nz1Df0Nitne3v7aK+oZEH77kLn98HQGvwHPsOHOTyebPRdT0xfXHxRfh9PtpC4cT7Lxg1ksDQQCIeEsgjNzeHWF95i2a3pkmG9kFBYnujqCigu3TOBoPEYga58f0Xd61YxneXLeX4iZO8uO7V+A96RXNLK29t3c5bW7eTyv1334lpWYn9VcoZraf0p5QiZhgEg+d4c3M16369oVtbzz/7ZNJ+L6d/p51IJMqmLTW8ubmaaVMmMqtqOoUF+cn9Jv525nE++btcrqR8I+FID30VgKJbXkmakndzcwsXXXgBXm9u0nLxeNx43B6am1uSpv/obx/z3q4P8XjcNLW2UhQfBbW1hQie+5I7vvdQt3kYW1aK0dGR1H7Xz0HFF0ufeYtmt6ZJxo7i0yBpOCdxdsdTJ5dTveN9TjWeZkjgYjTsfZV+fLS2BnFpnYN3XXdz2y038sMn1uL1enttn5S4u28fgPHQmlU8fN+qpF9vqdOnYsQMfvmr12hrC7Fxw8sUFeSjgNNnmmg83dRj//3N34ljKX0Vxs8Hc/rqz/zrupuzwSAdRgfk5nZOoBSWshKHozvvf+fdP/DCT56i7tARXn9jCz94fC0+nxdd19HdOq+/tp6ZVRXn9Xn3xf97/ZN4cMXpkplNfErZhVIp4j+jJM7yuHz8OErHFLN527tEo9FuvrPeoGDalAn85+ARGhoa+24fuvvxF5WC3BwPkydcQt3BIzS3nO0jv65t2a+HwmGOHvuUJTdcQ2H+sB7bRyl0lxvTtDBN1f/843E40dfVFObnJ89fH98n3aVjmhaWaSX8aVMm8ulnX1B/8lTS9Ec/+Qyf18vI4cOT2n/0wXuYM3MGi69dxIn6Bv78l7+DUowoKqJ8XBl7a/djmmbv+acsj54+z8Gw/kk8iOIBkKF9UFr8J5WoqK1FRQWsWrmcD/70ES+t/w2fn6gnEmmn5exZdu/dR1solJh++tQpVEybxC/Wvcrnx0+gFERj7eytPUBbONSlXbr3R3J8xfx5aJrG+lc3cKapBTRoC4XZU7uPWKyj8y0kt6e73eT5/eze8y9isQ6isXZqduzi7W07ktovLRlNQ+NpDh09hmnZmxXPP39bdd1NXp6f3XtqiRkG0fYYNTs/iPfV+3ItLSm2+z7yCaayiMU6mD5tMpfOqODFda9w/EQDlrKo3V/Hb3+3kcXXLmL06AuSll9x8YVomotRo0awYvmt/H7j23z+RT0+v5elS65n2/b3eKfmfaKxdpSC+oZTHDx8rNfl/ZWvi4oOgIwUKIV9noWoaFedWVXByy89T/3JU1x943Kmz7uKuVfeSPWO91n7wGpKSopRSuH15fLI/av5VukYlt6+mgmV85m3cAmbttQQDJ5LtAd07ydl/SsqLOCZJ79POBThsqtupnz65Sy84Tb++OFfCUXCOPtOEo/4+wJ5fu64/VZqdu5i6uyFXHfzClqDQdbcvTKp/bKyUuZ/ezar7nuUSVUL2Fq987zzdzQQ8HPH8nhfsxZy3dIVtJ7t0lcvyzPR95pHmVS5gK01O/F6c3nkgdWMH1fGTctXMbFyAfc+/ARLbriGZd+5Ken7mbr85s6u4pJxZWx8exuRaJRLK6fx7NOP8cbmd6iYvYgJlfNZ88iTHDpyjA7T7HF5Oyr/B0R704GQkdttFBYMHVASwjcf+xBzA13XCeT56e1ciGh7O9FoOzkeD/74EWYD7dPrzcXbdf9MHxiGQSgcIc/vw+Px9DiNUoq2UBhlWQQCAVyuznnpT/7n01d/+nbmtz/t9YRlKdra2gDIi58KIAjp0tJ6DkjvdhuZKVD5doFSJI/oJJZYYoklzu645Wz6BSqj94NyNjtKLLHEEksscSJOk8xei88OREVFRUVFOzVNMjeC6lo6RUVFRUVFE5oeGToPyv6jFKDi2iUWX3zxxRc/S/0BkJkrSWhdREupnVrKZOKLL7744mednw6Z2QelVDwTp5p2NZ0n4osvvvjiZ6efHhm6Fp/Wy3OVEosvvvjii5+9fv/I2AhK00Ap0FCJtGyNx+KLL7744medPxAycpCEfd6VcxfFeKylxOKLL7744medPxAyuA8KQIHSbJVYYokllljiATCgSx21tH5Jh2nGb92dwVOqBEEQhK89pmnRFgrj1nUKC4b0+1JHaY+gNE3D43HTYZrEDAOfnhPfBmnXT03DjlNVfPHFF1/8rPANwwDsOzz3pzA5pD2CsiyLWMygNWhf9djv8+Jx63ZmdopdNPFG8cUXX3zxs8A3DINwpB2AgmEBcnI8uFyufhWqtAoUgGVZmKZJONJOKBwFICfHg8fjRnfJ5j5BEIRsxLQsDKODWMwePeX5vfh9uei6jquftSHtAuWMopwi5VRKQRAEQQDw+3KTilN/N/OlXaAguUgZRgcxw9YO00y3SUEQBOFrjFvX8Xjc5HhsTbc4wQALFHQWKcuyEs+d1wVBEITswSlCTkFyuVxpFyfIQIECuxg5DycWBEEQsg+nGGmalnik3VYmCpSDFCZBEAQBGFBhcsjQxWJtMpGQIAiCIEBG76grCIIgCJlDCpQgCIIwKJECJQiCIAxKpEAJgiAIgxIpUIIgCMKgRAqUIAiCMCiRAiUIgiAMSqRACYIgCIMSKVCCIAjCoOS/nOQ7sq1wapUAAAAASUVORK5CYII=)<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`3️⃣ We're now ready to push our trained model to the 🤗 Hub 🔥 !!<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
blog/notebooks/101_train-decision-transformers.ipynb/0
{ "file_path": "blog/notebooks/101_train-decision-transformers.ipynb", "repo_id": "blog", "token_count": 165600 }
40
<jupyter_start><jupyter_text>Train and Fine-Tune Sentence Transformers Models - Notebook Companion<jupyter_code>%%capture !pip install sentence-transformers<jupyter_output><empty_output><jupyter_text>How Sentence Transformers models work<jupyter_code>from sentence_transformers import SentenceTransformer, models ## Step 1: use an existing language model word_embedding_model = models.Transformer('distilroberta-base') ## Step 2: use a pool function over the token embeddings pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension()) ## Join steps 1 and 2 using the modules argument model = SentenceTransformer(modules=[word_embedding_model, pooling_model])<jupyter_output><empty_output><jupyter_text>How to prepare your dataset for training a Sentence Transformers model<jupyter_code>%%capture !pip install datasets from datasets import load_dataset dataset_id = "embedding-data/QQP_triplets" # dataset_id = "embedding-data/sentence-compression" dataset = load_dataset(dataset_id) print(f"- The {dataset_id} dataset has {dataset['train'].num_rows} examples.") print(f"- Each example is a {type(dataset['train'][0])} with a {type(dataset['train'][0]['set'])} as value.") print(f"- Examples look like this: {dataset['train'][0]}")<jupyter_output>- The embedding-data/QQP_triplets dataset has 101762 examples. - Each example is a <class 'dict'> with a <class 'dict'> as value. - Examples look like this: {'set': {'query': 'Why in India do we not have one on one political debate as in USA?', 'pos': ['Why cant we have a public debate between politicians in India like the one in US?'], 'neg': ['Can people on Quora stop India Pakistan debate? We are sick and tired seeing this everyday in bulk?', 'Why do politicians, instead of having a decent debate on issues going in and around the world, end up fighting always?', 'Can educated politicians make a difference in India?', 'What are some unusual aspects about politics and government in India?', 'What is debate?', 'Why does civic public communication and discourse seem so hollow in modern India?', 'What is a Parliamentary debate?', "Why do we always have two candidates at the U.S. presidential debate. yet the ballot has about 7 candidates? Isn't that a misrepresentation of democracy?", 'Wh[...]<jupyter_text>Convert the examples into `InputExample`s. It might around 10 seconds in Google Colab.<jupyter_code>from sentence_transformers import InputExample train_examples = [] train_data = dataset['train']['set'] # For agility we only 1/2 of our available data n_examples = dataset['train'].num_rows // 2 for i in range(n_examples): example = train_data[i] train_examples.append(InputExample(texts=[example['query'], example['pos'][0], example['neg'][0]])) print(f"We have a {type(train_examples)} of length {len(train_examples)} containing {type(train_examples[0])}'s.")<jupyter_output>We have a <class 'list'> of length 50881 containing <class 'sentence_transformers.readers.InputExample.InputExample'>'s.<jupyter_text>We wrap our training dataset into a Pytorch `Dataloader` to shuffle examples and get batch sizes.<jupyter_code>from torch.utils.data import DataLoader train_dataloader = DataLoader(train_examples, shuffle=True, batch_size=16)<jupyter_output><empty_output><jupyter_text>Loss functions for training a Sentence Transformers model<jupyter_code>from sentence_transformers import losses train_loss = losses.TripletLoss(model=model)<jupyter_output><empty_output><jupyter_text>How to train a Sentence Transformer model<jupyter_code>num_epochs = 10 warmup_steps = int(len(train_dataloader) * num_epochs * 0.1) #10% of train data<jupyter_output><empty_output><jupyter_text>Training takes around 45 minutes with a Google Colab Pro account. Decrease the number of epochs and examples if you are using a free account or no GPU.<jupyter_code>model.fit(train_objectives=[(train_dataloader, train_loss)], epochs=num_epochs, warmup_steps=warmup_steps)<jupyter_output><empty_output><jupyter_text>How to share a Sentence Transformers to the Hugging Face Hub<jupyter_code>!huggingface-cli login model.save_to_hub( "distilroberta-base-sentence-transformer", organization="embedding-data", train_datasets=["embedding-data/QQP_triplets"], exist_ok=True, )<jupyter_output><empty_output><jupyter_text>Extra: How to fine-tune a Sentence Transformer model Now we will fine-tune our Sentence Transformer model.<jupyter_code>modelB = SentenceTransformer('embedding-data/distilroberta-base-sentence-transformer') dataset_id = "embedding-data/sentence-compression" datasetB = load_dataset(dataset_id) print(f"Examples look like this: {datasetB['train']['set'][0]}") train_examplesB = [] train_dataB = dataset['train']['set'] n_examples = dataset['train'].num_rows for i in range(n_examples): example = train_dataB[i] train_examplesB.append(InputExample(texts=[example[0], example[1]])) train_dataloaderB = DataLoader(train_examplesB, shuffle=True, batch_size=64) train_lossB = losses.MultipleNegativesRankingLoss(model=modelB) num_epochsB = 10 warmup_stepsB = int(len(train_dataloaderB) * num_epochsB * 0.1) #10% of train data modelB.fit(train_objectives=[(train_dataloaderB, train_lossB)], epochs=num_epochsB, warmup_steps=warmup_stepsB) modelB.save_to_hub( "distilroberta-base-sentence-transformer", organization="embedding-data", train_datasets=["embedding-data/sentence-compression"], exist_ok=True, )<jupyter_output><empty_output>
blog/notebooks/95_Training_Sentence_Transformers.ipynb/0
{ "file_path": "blog/notebooks/95_Training_Sentence_Transformers.ipynb", "repo_id": "blog", "token_count": 1836 }
41
--- title: 'Accelerated Inference with Optimum and Transformers Pipelines' thumbnail: /blog/assets/66_optimum_inference/thumbnail.png authors: - user: philschmid --- # Accelerated Inference with Optimum and Transformers Pipelines > Inference has landed in Optimum with support for Hugging Face Transformers pipelines, including text-generation using ONNX Runtime. The adoption of BERT and Transformers continues to grow. Transformer-based models are now not only achieving state-of-the-art performance in Natural Language Processing but also for Computer Vision, Speech, and Time-Series. 💬 🖼 🎤 ⏳ Companies are now moving from the experimentation and research phase to the production phase in order to use Transformer models for large-scale workloads. But by default BERT and its friends are relatively slow, big, and complex models compared to traditional Machine Learning algorithms. To solve this challenge, we created [Optimum](https://huggingface.co/blog/hardware-partners-program) – an extension of [Hugging Face Transformers](https://github.com/huggingface/transformers) to accelerate the training and inference of Transformer models like BERT. In this blog post, you'll learn: - [1. What is Optimum? An ELI5](#1-what-is-optimum-an-eli5) - [2. New Optimum inference and pipeline features](#2-new-optimum-inference-and-pipeline-features) - [3. End-to-End tutorial on accelerating RoBERTa for Question-Answering including quantization and optimization](#3-end-to-end-tutorial-on-accelerating-roberta-for-question-answering-including-quantization-and-optimization) - [4. Current Limitations](#4-current-limitations) - [5. Optimum Inference FAQ](#5-optimum-inference-faq) - [6. What’s next?](#6-whats-next) Let's get started! 🚀 ## 1. What is Optimum? An ELI5 [Hugging Face Optimum](https://github.com/huggingface/optimum) is an open-source library and an extension of [Hugging Face Transformers](https://github.com/huggingface/transformers), that provides a unified API of performance optimization tools to achieve maximum efficiency to train and run models on accelerated hardware, including toolkits for optimized performance on [Graphcore IPU](https://github.com/huggingface/optimum-graphcore) and [Habana Gaudi](https://github.com/huggingface/optimum-habana). Optimum can be used for accelerated training, quantization, graph optimization, and now inference as well with support for [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines). ## 2. New Optimum inference and pipeline features With [release](https://github.com/huggingface/optimum/releases/tag/v1.2.0) of Optimum 1.2, we are adding support for [inference](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort) and [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines). This allows Optimum users to leverage the same API they are used to from transformers with the power of accelerated runtimes, like [ONNX Runtime](https://onnxruntime.ai/). **Switching from Transformers to Optimum Inference** The [Optimum Inference models](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort) are API compatible with Hugging Face Transformers models. This means you can just replace your `AutoModelForXxx` class with the corresponding `ORTModelForXxx` class in Optimum. For example, this is how you can use a question answering model in Optimum: ```diff from transformers import AutoTokenizer, pipeline -from transformers import AutoModelForQuestionAnswering +from optimum.onnxruntime import ORTModelForQuestionAnswering -model = AutoModelForQuestionAnswering.from_pretrained("deepset/roberta-base-squad2") # pytorch checkpoint +model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2") # onnx checkpoint tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2") optimum_qa = pipeline("question-answering", model=model, tokenizer=tokenizer) question = "What's my name?" context = "My name is Philipp and I live in Nuremberg." pred = optimum_qa(question, context) ``` In the first release, we added [support for ONNX Runtime](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort) but there is more to come! These new `ORTModelForXX` can now be used with the [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines). They are also fully integrated into the [Hugging Face Hub](https://huggingface.co/models) to push and pull optimized checkpoints from the community. In addition to this, you can use the [ORTQuantizer](https://huggingface.co/docs/optimum/main/en/onnxruntime/quantization) and [ORTOptimizer](https://huggingface.co/docs/optimum/main/en/onnxruntime/optimization) to first quantize and optimize your model and then run inference on it. Check out [End-to-End Tutorial on accelerating RoBERTa for question-answering including quantization and optimization](#3-end-to-end-tutorial-on-accelerating-roberta-for-question-answering-including-quantization-and-optimization) for more details. ## 3. End-to-End tutorial on accelerating RoBERTa for Question-Answering including quantization and optimization In this End-to-End tutorial on accelerating RoBERTa for question-answering, you will learn how to: 1. Install `Optimum` for ONNX Runtime 2. Convert a Hugging Face `Transformers` model to ONNX for inference 3. Use the `ORTOptimizer` to optimize the model 4. Use the `ORTQuantizer` to apply dynamic quantization 5. Run accelerated inference using Transformers pipelines 6. Evaluate the performance and speed Let’s get started 🚀 *This tutorial was created and run on an `m5.xlarge` AWS EC2 Instance.* ### 3.1 Install `Optimum` for Onnxruntime Our first step is to install `Optimum` with the `onnxruntime` utilities. ```bash pip install "optimum[onnxruntime]==1.2.0" ``` This will install all required packages for us including `transformers`, `torch`, and `onnxruntime`. If you are going to use a GPU you can install optimum with `pip install optimum[onnxruntime-gpu]`. ### 3.2 Convert a Hugging Face `Transformers` model to ONNX for inference** Before we can start optimizing we need to convert our vanilla `transformers` model to the `onnx` format. To do this we will use the new [ORTModelForQuestionAnswering](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort#optimum.onnxruntime.ORTModelForQuestionAnswering) class calling the `from_pretrained()` method with the `from_transformers` attribute. The model we are using is the [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) a fine-tuned RoBERTa model on the SQUAD2 dataset achieving an F1 score of `82.91` and as the feature (task) `question-answering`. ```python from pathlib import Path from transformers import AutoTokenizer, pipeline from optimum.onnxruntime import ORTModelForQuestionAnswering model_id = "deepset/roberta-base-squad2" onnx_path = Path("onnx") task = "question-answering" # load vanilla transformers and convert to onnx model = ORTModelForQuestionAnswering.from_pretrained(model_id, from_transformers=True) tokenizer = AutoTokenizer.from_pretrained(model_id) # save onnx checkpoint and tokenizer model.save_pretrained(onnx_path) tokenizer.save_pretrained(onnx_path) # test the model with using transformers pipeline, with handle_impossible_answer for squad_v2 optimum_qa = pipeline(task, model=model, tokenizer=tokenizer, handle_impossible_answer=True) prediction = optimum_qa(question="What's my name?", context="My name is Philipp and I live in Nuremberg.") print(prediction) # {'score': 0.9041663408279419, 'start': 11, 'end': 18, 'answer': 'Philipp'} ``` We successfully converted our vanilla transformers to `onnx` and used the model with the `transformers.pipelines` to run the first prediction. Now let's optimize it. 🏎 If you want to learn more about exporting transformers model check-out the documentation: [Export 🤗 Transformers Models](https://huggingface.co/docs/transformers/main/en/serialization) ### 3.3 Use the `ORTOptimizer` to optimize the model After we saved our onnx checkpoint to `onnx/` we can now use the `ORTOptimizer` to apply graph optimization such as operator fusion and constant folding to accelerate latency and inference. ```python from optimum.onnxruntime import ORTOptimizer from optimum.onnxruntime.configuration import OptimizationConfig # create ORTOptimizer and define optimization configuration optimizer = ORTOptimizer.from_pretrained(model_id, feature=task) optimization_config = OptimizationConfig(optimization_level=99) # enable all optimizations # apply the optimization configuration to the model optimizer.export( onnx_model_path=onnx_path / "model.onnx", onnx_optimized_model_output_path=onnx_path / "model-optimized.onnx", optimization_config=optimization_config, ) ``` To test performance we can use the `ORTModelForQuestionAnswering` class again and provide an additional `file_name` parameter to load our optimized model. **(This also works for models available on the hub).** ```python from optimum.onnxruntime import ORTModelForQuestionAnswering # load quantized model opt_model = ORTModelForQuestionAnswering.from_pretrained(onnx_path, file_name="model-optimized.onnx") # test the quantized model with using transformers pipeline opt_optimum_qa = pipeline(task, model=opt_model, tokenizer=tokenizer, handle_impossible_answer=True) prediction = opt_optimum_qa(question="What's my name?", context="My name is Philipp and I live in Nuremberg.") print(prediction) # {'score': 0.9041663408279419, 'start': 11, 'end': 18, 'answer': 'Philipp'} ``` We will evaluate the performance changes in step [3.6 Evaluate the performance and speed](#36-evaluate-the-performance-and-speed) in detail. ### 3.4 Use the `ORTQuantizer` to apply dynamic quantization After we have optimized our model we can accelerate it even more by quantizing it using the `ORTQuantizer`. The `ORTOptimizer` can be used to apply dynamic quantization to decrease the size of the model size and accelerate latency and inference. *We use the `avx512_vnni` since the instance is powered by an intel cascade-lake CPU supporting avx512.* ```python from optimum.onnxruntime import ORTQuantizer from optimum.onnxruntime.configuration import AutoQuantizationConfig # create ORTQuantizer and define quantization configuration quantizer = ORTQuantizer.from_pretrained(model_id, feature=task) qconfig = AutoQuantizationConfig.avx512_vnni(is_static=False, per_channel=True) # apply the quantization configuration to the model quantizer.export( onnx_model_path=onnx_path / "model-optimized.onnx", onnx_quantized_model_output_path=onnx_path / "model-quantized.onnx", quantization_config=qconfig, ) ``` We can now compare this model size as well as some latency performance ```python import os # get model file size size = os.path.getsize(onnx_path / "model.onnx")/(1024*1024) print(f"Vanilla Onnx Model file size: {size:.2f} MB") size = os.path.getsize(onnx_path / "model-quantized.onnx")/(1024*1024) print(f"Quantized Onnx Model file size: {size:.2f} MB") # Vanilla Onnx Model file size: 473.31 MB # Quantized Onnx Model file size: 291.77 MB ``` <figure class="image table text-center m-0 w-full"> <img src="assets/66_optimum_inference/model_size.png" alt="Model size comparison"/> </figure> We decreased the size of our model by almost 50% from 473MB to 291MB. To run inference we can use the `ORTModelForQuestionAnswering` class again and provide an additional `file_name` parameter to load our quantized model. **(This also works for models available on the hub).** ```python # load quantized model quantized_model = ORTModelForQuestionAnswering.from_pretrained(onnx_path, file_name="model-quantized.onnx") # test the quantized model with using transformers pipeline quantized_optimum_qa = pipeline(task, model=quantized_model, tokenizer=tokenizer, handle_impossible_answer=True) prediction = quantized_optimum_qa(question="What's my name?", context="My name is Philipp and I live in Nuremberg.") print(prediction) # {'score': 0.9246969819068909, 'start': 11, 'end': 18, 'answer': 'Philipp'} ``` Nice! The model predicted the same answer. ### 3.5 Run accelerated inference using Transformers pipelines [Optimum](https://huggingface.co/docs/optimum/main/en/pipelines#optimizing-with-ortoptimizer) has built-in support for [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines). This allows us to leverage the same API that we know from using PyTorch and TensorFlow models. We have already used this feature in steps 3.2,3.3 & 3.4 to test our converted and optimized models. At the time of writing this, we are supporting [ONNX Runtime](https://onnxruntime.ai/) with more to come in the future. An example of how to use the [transformers pipelines](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#pipelines) can be found below. ```python from transformers import AutoTokenizer, pipeline from optimum.onnxruntime import ORTModelForQuestionAnswering tokenizer = AutoTokenizer.from_pretrained(onnx_path) model = ORTModelForQuestionAnswering.from_pretrained(onnx_path) optimum_qa = pipeline("question-answering", model=model, tokenizer=tokenizer) prediction = optimum_qa(question="What's my name?", context="My name is Philipp and I live in Nuremberg.") print(prediction) # {'score': 0.9041663408279419, 'start': 11, 'end': 18, 'answer': 'Philipp'} ``` In addition to this we added a `pipelines` API to Optimum to guarantee more safety for your accelerated models. Meaning if you are trying to use `optimum.pipelines` with an unsupported model or task you will see an error. You can use `optimum.pipelines` as a replacement for `transformers.pipelines`. ```python from transformers import AutoTokenizer from optimum.onnxruntime import ORTModelForQuestionAnswering from optimum.pipelines import pipeline tokenizer = AutoTokenizer.from_pretrained(onnx_path) model = ORTModelForQuestionAnswering.from_pretrained(onnx_path) optimum_qa = pipeline("question-answering", model=model, tokenizer=tokenizer, handle_impossible_answer=True) prediction = optimum_qa(question="What's my name?", context="My name is Philipp and I live in Nuremberg.") print(prediction) # {'score': 0.9041663408279419, 'start': 11, 'end': 18, 'answer': 'Philipp'} ``` ### 3.6 Evaluate the performance and speed During this [End-to-End tutorial on accelerating RoBERTa for Question-Answering including quantization and optimization](#3-end-to-end-tutorial-on-accelerating-roberta-for-question-answering-including-quantization-and-optimization), we created 3 different models. A vanilla converted model, an optimized model, and a quantized model. As the last step of the tutorial, we want to take a detailed look at the performance and accuracy of our model. Applying optimization techniques, like graph optimizations or quantization not only impact performance (latency) those also might have an impact on the accuracy of the model. So accelerating your model comes with a trade-off. Let's evaluate our models. Our transformers model [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) was fine-tuned on the SQUAD2 dataset. This will be the dataset we use to evaluate our models. ```python from datasets import load_metric,load_dataset metric = load_metric("squad_v2") dataset = load_dataset("squad_v2")["validation"] print(f"length of dataset {len(dataset)}") #length of dataset 11873 ``` We can now leverage the [map](https://huggingface.co/docs/datasets/v2.1.0/en/process#map) function of [datasets](https://huggingface.co/docs/datasets/index) to iterate over the validation set of squad 2 and run prediction for each data point. Therefore we write a `evaluate` helper method which uses our pipelines and applies some transformation to work with the [squad v2 metric.](https://huggingface.co/metrics/squad_v2) *This can take quite a while (1.5h)* ```python def evaluate(example): default = optimum_qa(question=example["question"], context=example["context"]) optimized = opt_optimum_qa(question=example["question"], context=example["context"]) quantized = quantized_optimum_qa(question=example["question"], context=example["context"]) return { 'reference': {'id': example['id'], 'answers': example['answers']}, 'default': {'id': example['id'],'prediction_text': default['answer'], 'no_answer_probability': 0.}, 'optimized': {'id': example['id'],'prediction_text': optimized['answer'], 'no_answer_probability': 0.}, 'quantized': {'id': example['id'],'prediction_text': quantized['answer'], 'no_answer_probability': 0.}, } result = dataset.map(evaluate) # COMMENT IN to run evaluation on 2000 subset of the dataset # result = dataset.shuffle().select(range(2000)).map(evaluate) ``` Now lets compare the results ```python default_acc = metric.compute(predictions=result["default"], references=result["reference"]) optimized = metric.compute(predictions=result["optimized"], references=result["reference"]) quantized = metric.compute(predictions=result["quantized"], references=result["reference"]) print(f"vanilla model: exact={default_acc['exact']}% f1={default_acc['f1']}%") print(f"optimized model: exact={optimized['exact']}% f1={optimized['f1']}%") print(f"quantized model: exact={quantized['exact']}% f1={quantized['f1']}%") # vanilla model: exact=79.07858165585783% f1=82.14970024570314% # optimized model: exact=79.07858165585783% f1=82.14970024570314% # quantized model: exact=78.75010528088941% f1=81.82526107204629% ``` Our optimized & quantized model achieved an exact match of `78.75%` and an f1 score of `81.83%` which is `99.61%` of the original accuracy. Achieving `99%` of the original model is very good especially since we used dynamic quantization. Okay, let's test the performance (latency) of our optimized and quantized model. But first, let’s extend our context and question to a more meaningful sequence length of 128. ```python context="Hello, my name is Philipp and I live in Nuremberg, Germany. Currently I am working as a Technical Lead at Hugging Face to democratize artificial intelligence through open source and open science. In the past I designed and implemented cloud-native machine learning architectures for fin-tech and insurance companies. I found my passion for cloud concepts and machine learning 5 years ago. Since then I never stopped learning. Currently, I am focusing myself in the area NLP and how to leverage models like BERT, Roberta, T5, ViT, and GPT2 to generate business value." question="As what is Philipp working?" ``` To keep it simple, we are going to use a python loop and calculate the avg/mean latency for our vanilla model and for the optimized and quantized model. ```python from time import perf_counter import numpy as np def measure_latency(pipe): latencies = [] # warm up for _ in range(10): _ = pipe(question=question, context=context) # Timed run for _ in range(100): start_time = perf_counter() _ = pipe(question=question, context=context) latency = perf_counter() - start_time latencies.append(latency) # Compute run statistics time_avg_ms = 1000 * np.mean(latencies) time_std_ms = 1000 * np.std(latencies) return f"Average latency (ms) - {time_avg_ms:.2f} +\- {time_std_ms:.2f}" print(f"Vanilla model {measure_latency(optimum_qa)}") print(f"Optimized & Quantized model {measure_latency(quantized_optimum_qa)}") # Vanilla model Average latency (ms) - 117.61 +\- 8.48 # Optimized & Quantized model Average latency (ms) - 64.94 +\- 3.65 ``` <figure class="image table text-center m-0 w-full"> <img src="assets/66_optimum_inference/results.png" alt="Latency & F1 results"/> </figure> We managed to accelerate our model latency from `117.61ms` to `64.94ms` or roughly 2x while keeping `99.61%` of the accuracy. Something we should keep in mind is that we used a mid-performant CPU instance with 2 physical cores. By switching to GPU or a more performant CPU instance, e.g. [ice-lake powered you can decrease the latency number down to a few milliseconds.](https://huggingface.co/blog/bert-cpu-scaling-part-2#more-efficient-ai-processing-on-latest-intel-ice-lake-cpus) ## 4. Current Limitations We just started supporting inference in [https://github.com/huggingface/optimum](https://github.com/huggingface/optimum) so we would like to share current limitations as well. All of those limitations are on the roadmap and will be resolved in the near future. - **Remote Models > 2GB:** Currently, only models smaller than 2GB can be loaded from the [Hugging Face Hub](https://hf.co/). We are working on adding support for models > 2GB / multi-file models. - **Seq2Seq tasks/model:** We don’t have support for seq2seq tasks, like summarization and models like T5 mostly due to the limitation of the single model support. But we are actively working to solve it, to provide you with the same experience you are familiar with in transformers. - **Past key values:** Generation models like GPT-2 use something called past key values which are precomputed key-value pairs of the attention blocks and can be used to speed up decoding. Currently the ORTModelForCausalLM is not using past key values. - **No cache:** Currently when loading an optimized model (*.onnx), it will not be cached locally. ## 5. Optimum Inference FAQ **Which tasks are supported?** You can find a list of all supported tasks in the [documentation](https://huggingface.co/docs/optimum/main/en/pipelines). Currently support pipelines tasks are `feature-extraction`, `text-classification`, `token-classification`, `question-answering`, `zero-shot-classification`, `text-generation` **Which models are supported?** Any model that can be exported with [transformers.onnx](https://huggingface.co/docs/transformers/serialization) and has a supported task can be used, this includes among others BERT, ALBERT, GPT2, RoBERTa, XLM-RoBERTa, DistilBERT .... **Which runtimes are supported?** Currently, ONNX Runtime is supported. We are working on adding more in the future. [Let us know](https://discuss.huggingface.co/c/optimum/59) if you are interested in a specific runtime. **How can I use Optimum with Transformers?** You can find an example and instructions in our [documentation](https://huggingface.co/docs/optimum/main/en/pipelines#transformers-pipeline-usage). **How can I use GPUs?** To be able to use GPUs you simply need to install `optimum[onnxruntine-gpu]` which will install the required GPU providers and use them by default. **How can I use a quantized and optimized model with pipelines?** You can load the optimized or quantized model using the new [ORTModelForXXX](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort) classes using the [from_pretrained](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort#optimum.onnxruntime.ORTModelForQuestionAnswering.forward.example) method. You can learn more about it in our [documentation](https://huggingface.co/docs/optimum/main/en/onnxruntime/modeling_ort#optimum-inference-with-onnx-runtime). ## 6. What’s next? What’s next for Optimum you ask? A lot of things. We are focused on making Optimum the reference open-source toolkit to work with transformers for acceleration & optimization. To be able to achieve this we will solve the current limitations, improve the documentation, create more content and examples and push the limits for accelerating and optimizing transformers. Some important features on the roadmap for Optimum amongst the [current limitations](#4-current-limitations) are: - Support for speech models (Wav2vec2) and speech tasks (automatic speech recognition) - Support for vision models (ViT) and vision tasks (image classification) - Improve performance by adding support for [OrtValue](https://onnxruntime.ai/docs/api/python/api_summary.html#ortvalue) and [IOBinding](https://onnxruntime.ai/docs/api/python/api_summary.html#iobinding) - Easier ways to evaluate accelerated models - Add support for other runtimes and providers like TensorRT and AWS-Neuron --- Thanks for reading! If you are as excited as I am about accelerating Transformers, make them efficient and scale them to billions of requests. You should apply, [we are hiring](https://apply.workable.com/huggingface/#jobs).🚀 If you have any questions, feel free to contact me, through [Github](https://github.com/huggingface/optimum/issues), or on the [forum](https://discuss.huggingface.co/c/optimum/59). You can also connect with me on [Twitter](https://twitter.com/_philschmid) or [LinkedIn](https://www.linkedin.com/in/philipp-schmid-a6a2bb196/).
blog/optimum-inference.md/0
{ "file_path": "blog/optimum-inference.md", "repo_id": "blog", "token_count": 7406 }
42
--- title: "A Chatbot on your Laptop: Phi-2 on Intel Meteor Lake" thumbnail: /blog/assets/phi2-intel-meteor-lake/02.jpg authors: - user: juliensimon - user: echarlaix - user: ofirzaf guest: true - user: imargulis guest: true - user: guybd guest: true - user: moshew guest: true --- # A Chatbot on your Laptop: Phi-2 on Intel Meteor Lake <p align="center"> <img src="assets/phi2-intel-meteor-lake/02.jpg" alt="David vs. Goliath revisited" width="512"><br> </p> Because of their impressive abilities, large language models (LLMs) require significant computing power, which is seldom available on personal computers. Consequently, we have no choice but to deploy them on powerful bespoke AI servers hosted on-premises or in the cloud. ## Why local LLM inference is desirable What if we could run state-of-the-art open-source LLMs on a typical personal computer? Wouldn't we enjoy benefits like: * **Increased privacy**: our data would not be sent to an external API for inference. * **Lower latency**: we would save network round trips. * **Offline work**: we could work without network connectivity (a frequent flyer's dream!). * **Lower cost**: we wouldn't spend any money on API calls or model hosting. * **Customizability**: each user could find the models that best fit the tasks they work on daily, and they could even fine-tune them or use local Retrieval-Augmented Generation (RAG) to increase relevance. This all sounds very exciting indeed. So why aren't we doing it already? Returning to our opening statement, your typical reasonably priced laptop doesn't pack enough compute punch to run LLMs with acceptable performance. There is no multi-thousand-core GPU and no lightning-fast High Memory Bandwidth in sight. A lost cause, then? Of course not. ## Why local LLM inference is now possible There's nothing that the human mind can't make smaller, faster, more elegant, and more cost-effective. In recent months, the AI community has worked hard to shrink models without compromising their predictive quality. Three areas are exciting: * **Hardware acceleration**: modern CPU architectures embed hardware dedicated to accelerating the most common deep learning operators, such as matrix multiplication or convolution, enabling new Generative AI applications on AI PCs and significantly improving their speed and efficiency. * **Small Language Models (SLMs)**: thanks to innovative architectures and training techniques, these models are on par or even better than larger models. Because they have fewer parameters, inference requires less computing and memory, making them excellent candidates for resource-constrained environments. * **Quantization**: Quantization is a process that lowers memory and computing requirements by reducing the bit width of model weights and activations, for example, from 16-bit floating point (`fp16`) to 8-bit integers (`int8`). Reducing the number of bits means that the resulting model requires less memory at inference time, speeding up latency for memory-bound steps like the decoding phase when text is generated. In addition, operations like matrix multiplication can be performed faster thanks to integer arithmetic when quantizing both the weights and activations. In this post, we'll leverage all of the above. Starting from the Microsoft [Phi-2](https://huggingface.co/microsoft/phi-2) model, we will apply 4-bit quantization on the model weights, thanks to the Intel OpenVINO integration in our [Optimum Intel](https://github.com/huggingface/optimum-intel) library. Then, we will run inference on a mid-range laptop powered by an Intel Meteor Lake CPU. > **_NOTE_**: If you're interested in applying quantization on both weights and activations, you can find more information in our [documentation](https://huggingface.co/docs/optimum/main/en/intel/optimization_ov#static-quantization). Let's get to work. ## Intel Meteor Lake Launched in December 2023, Intel Meteor Lake, now renamed to [Core Ultra](https://www.intel.com/content/www/us/en/products/details/processors/core-ultra.html), is a new [architecture](https://www.intel.com/content/www/us/en/content-details/788851/meteor-lake-architecture-overview.html) optimized for high-performance laptops. The first Intel client processor to use a chiplet architecture, Meteor Lake includes: * A **power-efficient CPU** with up to 16 cores, * An **integrated GPU (iGPU)** with up to 8 Xe cores, each featuring 16 Xe Vector Engines (XVE). As the name implies, an XVE can perform vector operations on 256-bit vectors. It also implements the DP4a instruction, which computes a dot product between two vectors of 4-byte values, stores the result in a 32-bit integer, and adds it to a third 32-bit integer. * A **Neural Processing Unit (NPU)**, a first for Intel architectures. The NPU is a dedicated AI engine built for efficient client AI. It is optimized to handle demanding AI computations efficiently, freeing up the main CPU and graphics for other tasks. Compared to using the CPU or the iGPU for AI tasks, the NPU is designed to be more power-efficient. To run the demo below, we selected a [mid-range laptop](https://www.amazon.com/MSI-Prestige-Evo-Laptop-A1MG-029US/dp/B0CP9Y8Q6T/) powered by a [Core Ultra 7 155H CPU](https://www.intel.com/content/www/us/en/products/sku/236847/intel-core-ultra-7-processor-155h-24m-cache-up-to-4-80-ghz/specifications.html). Now, let's pick a lovely small language model to run on this laptop. > **_NOTE_**: To run this code on Linux, install your GPU driver by following [these instructions](https://docs.openvino.ai/2024/get-started/configurations/configurations-intel-gpu.html). ## The Microsoft Phi-2 model [Released](https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models/) in December 2023, [Phi-2](https://huggingface.co/microsoft/phi-2) is a 2.7-billion parameter model trained for text generation. On reported benchmarks, unfazed by its smaller size, Phi-2 outperforms some of the best 7-billion and 13-billion LLMs and even stays within striking distance of the much larger Llama-2 70B model. <kbd> <img src="assets/phi2-intel-meteor-lake/01.png"> </kbd> This makes it an exciting candidate for laptop inference. Curious readers may also want to experiment with the 1.1-billion [TinyLlama](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) model. Now, let's see how we can shrink the model to make it smaller and faster. ## Quantization with Intel OpenVINO and Optimum Intel Intel OpenVINO is an open-source toolkit for optimizing AI inference on many Intel hardware platforms ([Github](https://github.com/openvinotoolkit/openvino), [documentation](https://docs.openvino.ai/2024/home.html)), notably through model quantization. Partnering with Intel, we have integrated OpenVINO in Optimum Intel, our open-source library dedicated to accelerating Hugging Face models on Intel platforms ([Github](https://github.com/huggingface/optimum-intel), [documentation](https://huggingface.co/docs/optimum/intel/index)). First make sure you have the latest version of `optimum-intel` with all the necessary libraries installed: ```bash pip install --upgrade-strategy eager optimum[openvino,nncf] ``` This integration makes quantizing Phi-2 to 4-bit straightforward. We define a quantization configuration, set the optimization parameters, and load the model from the hub. Once it has been quantized and optimized, we store it locally. ```python from transformers import AutoTokenizer, pipeline from optimum.intel import OVModelForCausalLM, OVWeightQuantizationConfig model_id = "microsoft/phi-2" device = "gpu" # Create the quantization configuration with desired quantization parameters q_config = OVWeightQuantizationConfig(bits=4, group_size=128, ratio=0.8) # Create OpenVINO configuration with optimal settings for this model ov_config = {"PERFORMANCE_HINT": "LATENCY", "CACHE_DIR": "model_cache", "INFERENCE_PRECISION_HINT": "f32"} tokenizer = AutoTokenizer.from_pretrained(model_id) model = OVModelForCausalLM.from_pretrained( model_id, export=True, # export model to OpenVINO format: should be False if model already exported quantization_config=q_config, device=device, ov_config=ov_config, ) # Compilation step : if not explicitly called, compilation will happen before the first inference model.compile() pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) results = pipe("He's a dreadful magician and") save_directory = "phi-2-openvino" model.save_pretrained(save_directory) tokenizer.save_pretrained(save_directory) ``` The `ratio` parameter controls the fraction of weights we'll quantize to 4-bit (here, 80%) and the rest to 8-bit. The `group_size` parameter defines the size of the weight quantization groups (here, 128), each group having its scaling factor. Decreasing these two values usually improves accuracy at the expense of model size and inference latency. You can find more information on weight quantization in our [documentation](https://huggingface.co/docs/optimum/main/en/intel/optimization_ov#weight-only-quantization). > **_NOTE_**: the entire notebook with text generation examples is [available on Github](https://github.com/huggingface/optimum-intel/blob/main/notebooks/openvino/quantized_generation_demo.ipynb). So, how fast is the quantized model on our laptop? Watch the following videos to see for yourself. Remember to select the 1080p resolution for maximum sharpness. The first video asks our model a high-school physics question: "*Lily has a rubber ball that she drops from the top of a wall. The wall is 2 meters tall. How long will it take for the ball to reach the ground?*" <iframe width="100%" style="aspect-ratio: 16 / 9;"src="https://www.youtube.com/embed/nTNYRDORq14" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> The second video asks our model a coding question: "*Write a class which implements a fully connected layer with forward and backward functions using numpy. Use markdown markers for code.*" <iframe width="100%" style="aspect-ratio: 16 / 9;"src="https://www.youtube.com/embed/igWrp8gnJZg" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> As you can see in both examples, the generated answer is very high quality. The quantization process hasn't degraded the high quality of Phi-2, and the generation speed is adequate. I would be happy to work locally with this model daily. ## Conclusion Thanks to Hugging Face and Intel, you can now run LLMs on your laptop, enjoying the many benefits of local inference, like privacy, low latency, and low cost. We hope to see more quality models optimized for the Meteor Lake platform and its successor, Lunar Lake. The Optimum Intel library makes it very easy to quantize models for Intel platforms, so why not give it a try and share your excellent models on the Hugging Face Hub? We can always use more! Here are some resources to help you get started: * Optimum Intel [documentation](https://huggingface.co/docs/optimum/main/en/intel/inference) * [Developer resources](https://www.intel.com/content/www/us/en/developer/partner/hugging-face.html) from Intel and Hugging Face. * A video deep dive on model quantization: [part 1](https://youtu.be/kw7S-3s50uk), [part 2](https://youtu.be/fXBBwCIA0Ds) If you have questions or feedback, we'd love to answer them on the [Hugging Face forum](https://discuss.huggingface.co/). Thanks for reading!
blog/phi2-intel-meteor-lake.md/0
{ "file_path": "blog/phi2-intel-meteor-lake.md", "repo_id": "blog", "token_count": 3171 }
43
--- title: "Retrieval Augmented Generation with Huggingface Transformers and Ray" thumbnail: /blog/assets/12_ray_rag/ray_arch_updated.png authors: - user: ray-project guest: true --- # Retrieval Augmented Generation with Huggingface Transformers and Ray ##### A guest blog post by <a href="/amogkam">Amog Kamsetty</a> from the Anyscale team [Huggingface Transformers](https://huggingface.co/) recently added the [Retrieval Augmented Generation (RAG)](https://twitter.com/huggingface/status/1310597560906780680) model, a new NLP architecture that leverages external documents (like Wikipedia) to augment its knowledge and achieve state of the art results on knowledge-intensive tasks. In this blog post, we introduce the integration of [Ray](https://docs.ray.io/en/master/), a library for building scalable applications, into the RAG contextual document retrieval mechanism. This speeds up retrieval calls by 2x and improves the scalability of RAG distributed [fine-tuning](https://github.com/huggingface/transformers/tree/master/examples/research_projects/rag). ### What is Retrieval Augmented Generation (RAG)? ![alt_text](assets/12_ray_rag/rag_gif.gif "image_tooltip") _An overview of RAG. The model retrieves contextual documents from an external dataset as part of its execution. These contextual documents are used in conjunction with the original input to produce an output. The GIF is taken from [Facebook's original blog post](https://ai.facebook.com/blog/retrieval-augmented-generation-streamlining-the-creation-of-intelligent-natural-language-processing-models)._ Recently, [Huggingface](https://huggingface.co/) partnered with [Facebook AI](https://ai.facebook.com/) to introduce the [RAG](https://twitter.com/huggingface/status/1310597560906780680) model as part of its Transformers library. [RAG](https://ai.facebook.com/blog/retrieval-augmented-generation-streamlining-the-creation-of-intelligent-natural-language-processing-models/) acts just like any other [seq2seq model](https://blog.keras.io/a-ten-minute-introduction-to-sequence-to-sequence-learning-in-keras.html). However, [RAG](https://ai.facebook.com/blog/retrieval-augmented-generation-streamlining-the-creation-of-intelligent-natural-language-processing-models/) has an intermediate component that retrieves contextual documents from an external knowledge base (like a Wikipedia text corpus). These documents are then used in conjunction with the input sequence and passed into the underlying seq2seq [generator](https://huggingface.co/blog/how-to-generate). This information retrieval step allows [RAG](https://ai.facebook.com/blog/retrieval-augmented-generation-streamlining-the-creation-of-intelligent-natural-language-processing-models/) to make use of multiple sources of knowledge -- those that are baked into the model parameters and the information that is contained in the contextual passages, allowing it to outperform other state-of-the-art models in tasks like question answering. You can try it for yourself using this [demo provided by Huggingface](https://huggingface.co/rag/)! ### Scaling up fine-tuning This retrieval of contextual documents is crucial for RAG's state-of-the-art results but introduces an extra layer of complexity. When scaling up the training process via a data-parallel training routine, a naive implementation of the document lookup can become a bottleneck for training. Further, the **document index** used in the retrieval component is often quite large, making it infeasible for each training worker to load its own replicated copy of the index. The previous implementation of RAG fine-tuning leveraged the [torch.distributed](https://pytorch.org/docs/stable/distributed.html) communication package for the document retrieval portion. However, this implementation sometimes proved to be inflexible and limited in scalability. Instead, a framework-agnostic and a more flexible implementation for ad-hoc concurrent programming is required. [Ray](https://ray.io/) fits the bill perfectly. Ray is a simple, yet powerful Python library for general-purpose distributed and parallel programming. Using Ray for distributed document retrieval, we achieved a **2x speedup per retrieval call compared to `torch.distributed`**, and overall better fine-tuning scalability. ### Ray for Document Retrieval ![alt_text](assets/12_ray_rag/torch_distributed_document_retrieval.png "image_tooltip") _Document retrieval with the torch.distributed implementation_ The main drawback of the [torch.distributed](https://pytorch.org/docs/stable/distributed.html) implementation for document retrieval was that it latched onto the same process group used for training and only the rank 0 training worker loaded the index into memory. As a result, this implementation had some limitations: 1. **Synchronization bottleneck**: The rank 0 worker had to receive the inputs from all workers, perform the index query, and then send the results back to the other workers. This limited performance with multiple training workers. 2. **PyTorch specific**: The document retrieval process group had to latch onto the existing process group used for training, meaning that PyTorch had to be used for training as well. ![alt_text](assets/12_ray_rag/ray_arch_updated.png "image_tooltip") _Document retrieval with the Ray implementation_ To overcome these limitations, we introduced a novel implementation of distributed retrieval based on Ray. With [Ray’s stateful actor abstractions](https://docs.ray.io/en/master/actors.html), multiple processes that are separate from the training processes are used to load the index and handle the retrieval queries. With multiple Ray actors, retrieval is no longer a bottleneck and PyTorch is no longer a requirement for RAG. And as you can see below, using the [Ray](https://docs.ray.io/en/master/) based implementation leads to better retrieval performance for multi-GPU fine-tuning. The following results show the seconds per retrieval call and we can see that as we increase the number of GPUs that we train on, using Ray has comparatively better performance than `torch.distributed`. Also, if we increase the number of Ray processes that perform retrieval, we also get better performance with more training workers since a single retrieval process is no longer a bottleneck. <table> <tr> <td> </td> <td>2 GPU </td> <td>3 GPU </td> <td>4 GPU </td> </tr> <tr> <td>torch.distributed </td> <td>2.12 sec/retrieval </td> <td>2.62 sec/retrieve </td> <td>3.438 sec/retrieve </td> </tr> <tr> <td>Ray 2 retrieval processes </td> <td>1.49 sec/retrieve </td> <td>1.539 sec/retrieve </td> <td>2.029 sec/retrieve </td> </tr> <tr> <td>Ray 4 retrieval processes </td> <td>1.145 sec/retrieve </td> <td>1.484 sec/retrieve </td> <td>1.66 sec/retrieve </td> </tr> </table> _A performance comparison of different retrieval implementations. For each document retrieval implementation, we run 500 training steps with a per-GPU batch size of 8, and measure the time it takes to retrieve the contextual documents for each batch on the rank 0 training worker. As the results show, using multiple retrieval processes improves performance, especially as we scale training to multiple GPUs._ ### How do I use it? [Huggingface](https://huggingface.co/) provides a [PyTorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) based [fine tuning script](https://github.com/huggingface/transformers/tree/master/examples/research_projects/rag), and we extended it to add the Ray retrieval implementation as an option. To try it out, first install the necessary requirements ```bash pip install ray pip install transformers pip install -r transformers/examples/research_projects/rag/requirements.txt ``` Then, you can specify your data paths and other configurations and run [finetune-rag-ray.sh](https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/finetune_rag_ray.sh)! ```bash # Sample script to finetune RAG using Ray for distributed retrieval. # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" # Start a single-node Ray cluster. ray start --head # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options python examples/rag/finetune_rag.py \ --data_dir $DATA_DIR \ --output_dir $OUTPUT_DIR \ --model_name_or_path $MODEL_NAME_OR_PATH \ --model_type rag_sequence \ --fp16 \ --gpus 8 \ --profile \ --do_train \ --do_predict \ --n_val -1 \ --train_batch_size 8 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 100 \ --warmup_steps 500 \ --gradient_accumulation_steps 1 \ --distributed_retriever ray \ --num_retrieval_workers 4 # Stop the Ray cluster. ray stop ``` ## What’s next? Using RAG with [Huggingface transformers](https://github.com/huggingface/transformers/tree/master/examples/research_projects/rag) and the [Ray retrieval implementation](https://github.com/huggingface/transformers/blob/master/examples/research_projects/rag/finetune_rag_ray.sh) for faster distributed fine-tuning, you can leverage RAG for retrieval-based generation on your own knowledge-intensive tasks. Also, hyperparameter tuning is another aspect of transformer fine tuning and can have [huge impacts on accuracy](https://medium.com/distributed-computing-with-ray/hyperparameter-optimization-for-transformers-a-guide-c4e32c6c989b). For scalable and easy hyperparameter tuning, check out the [Ray Tune](https://docs.ray.io/en/latest/tune/) library. By using [Ray Tune’s integration with PyTorch Lightning](https://medium.com/distributed-computing-with-ray/scaling-up-pytorch-lightning-hyperparameter-tuning-with-ray-tune-4bd9e1ff9929), or the [built-in integration with Huggingface transformers](https://huggingface.co/blog/ray-tune), you can run experiments to find the perfect hyperparameters for your RAG model. And lastly, stay tuned for a potential Tensorflow implementation of [RAG](https://ai.facebook.com/blog/retrieval-augmented-generation-streamlining-the-creation-of-intelligent-natural-language-processing-models) on [Huggingface](https://huggingface.co/)! If you plan to try RAG+Ray integration out, please feel free to share your experiences on the [Ray Discourse](https://discuss.ray.io/) or join the [Ray community Slack](https://docs.google.com/forms/d/e/1FAIpQLSfAcoiLCHOguOm8e7Jnn-JJdZaCxPGjgVCvFijHB5PLaQLeig/viewform) for further discussion -- we’d love to hear from you! > Also published at https://medium.com/distributed-computing-with-ray/retrieval-augmented-generation-with-huggingface-transformers-and-ray-b09b56161b1e
blog/ray-rag.md/0
{ "file_path": "blog/ray-rag.md", "repo_id": "blog", "token_count": 3260 }
44
--- title: "We Raised $100 Million for Open & Collaborative Machine Learning 🚀" thumbnail: /blog/assets/65_series_c/thumbnail.jpg authors: - user: huggingface --- # We Raised $100 Million for Open & Collaborative Machine Learning 🚀 Today we have some exciting news to share! Hugging Face has raised $100 Million in Series C funding 🔥🔥🔥 led by Lux Capital with major participations from Sequoia, Coatue and support of existing investors Addition, a_capital, SV Angel, Betaworks, AIX Ventures, Kevin Durant, Rich Kleiman from Thirty Five Ventures, Olivier Pomel (co-founder & CEO at Datadog) and more. <figure class="image table text-center m-0 w-full"> <img src="/blog/assets/65_series_c/thumbnail.jpg" alt="Series C"/> </figure> We've come a long way since we first open sourced [PyTorch BERT](https://twitter.com/Thom_Wolf/status/1068637731281088513) in 2018 and are just getting started! 🙌 Machine learning is becoming the default way to build technology. When you think about your average day, machine learning is everywhere: from your Zoom background, to searching on Google, to ordering an Uber or writing an email with auto-complete --it's all machine learning. Hugging Face is now the fastest growing community & most used platform for machine learning! With 100,000 pre-trained models & 10,000 datasets hosted on the platform for NLP, computer vision, speech, time-series, biology, reinforcement learning, chemistry and more, the [Hugging Face Hub](https://huggingface.co/models) has become the Home of Machine Learning to create, collaborate, and deploy state-of-the-art models. <figure class="image table text-center m-0 w-full"> <img src="assets/65_series_c/home-of-machine-learning.png" alt="The Home of Machine Learning"/> </figure> Over 10,000 companies are now using Hugging Face to build technology with machine learning. Their Machine Learning scientists, Data scientists and Machine Learning engineers have saved countless hours while accelerating their machine learning roadmaps with the help of our [products](https://huggingface.co/platform) and [services](https://huggingface.co/support). We want to have a positive impact on the AI field. We think the direction of more responsible AI is through openly sharing models, datasets, training procedures, evaluation metrics and working together to solve issues. We believe open source and open science bring trust, robustness, reproducibility, and continuous innovation. With this in mind, we are leading [BigScience](https://bigscience.huggingface.co/), a collaborative workshop around the study and creation of very large language models gathering more than 1,000 researchers of all backgrounds and disciplines. We are now training the [world's largest open source multilingual language model](https://twitter.com/BigScienceLLM) 🌸 ⚠️ But there’s still a huge amount of work left to do. At Hugging Face, we know that Machine Learning has some important limitations and challenges that need to be tackled now like biases, privacy, and energy consumption. With openness, transparency & collaboration, we can foster responsible & inclusive progress, understanding & accountability to mitigate these challenges. Thanks to the new funding, we’ll be doubling down on research, open-source, products and responsible democratization of AI. <figure class="image table text-center m-0 w-full"> <img src="assets/65_series_c/team.png" alt="The Home of Machine Learning"/> </figure> It's been a hell of a ride to grow from 30 to 120+ team members in the past 12 months. We were super lucky to have been joined by incredibly talented (and fun!) teammates like [Dr. Margaret Mitchell](https://www.bloomberg.com/news/articles/2021-08-24/fired-at-google-after-critical-work-ai-researcher-mitchell-to-join-hugging-face) and the [Gradio team](https://gradio.app/joining-huggingface/), and we don't plan to stop here. We're [hiring for every position](https://apply.workable.com/huggingface) you can think of for every level of seniority. We are a remote-friendly, decentralized organization with transparency and value-inspired decision making by default. Huge thanks to every contributor in our amazing community and team, our customers, partners, and investors for helping us reach this point. We couldn't have done it without you, and we can't wait to work together with you on what's next. Your contributions are key to helping build a better future where AI is founded on open source, open science, ethics and collaboration. --- *For press inquiries, please contact <a href="mailto:team@huggingface.co">team@huggingface.co</a>*
blog/series-c.md/0
{ "file_path": "blog/series-c.md", "repo_id": "blog", "token_count": 1159 }
45
--- title: 🧨 Stable Diffusion in JAX / Flax ! thumbnail: /blog/assets/108_stable_diffusion_jax/thumbnail.png authors: - user: pcuenq - user: patrickvonplaten --- # 🧨 Stable Diffusion in JAX / Flax ! <a target="_blank" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion_jax_how_to.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> 🤗 Hugging Face [Diffusers](https://github.com/huggingface/diffusers) supports Flax since version `0.5.1`! This allows for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This post shows how to run inference using JAX / Flax. If you want more details about how Stable Diffusion works or want to run it in GPU, please refer to [this Colab notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). If you want to follow along, click the button above to open this post as a Colab notebook. First, make sure you are using a TPU backend. If you are running this notebook in Colab, select `Runtime` in the menu above, then select the option "Change runtime type" and then select `TPU` under the `Hardware accelerator` setting. Note that JAX is not exclusive to TPUs, but it shines on that hardware because each TPU server has 8 TPU accelerators working in parallel. ## Setup ``` python import jax num_devices = jax.device_count() device_type = jax.devices()[0].device_kind print(f"Found {num_devices} JAX devices of type {device_type}.") assert "TPU" in device_type, "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator" ``` *Output*: ```bash Found 8 JAX devices of type TPU v2. ``` Make sure `diffusers` is installed. ``` python !pip install diffusers==0.5.1 ``` Then we import all the dependencies. ``` python import numpy as np import jax import jax.numpy as jnp from pathlib import Path from jax import pmap from flax.jax_utils import replicate from flax.training.common_utils import shard from PIL import Image from huggingface_hub import notebook_login from diffusers import FlaxStableDiffusionPipeline ``` ## Model Loading Before using the model, you need to accept the model [license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) in order to download and use the weights. The license is designed to mitigate the potential harmful effects of such a powerful machine learning system. We request users to **read the license entirely and carefully**. Here we offer a summary: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content, 2. We claim no rights on the outputs you generate, you are free to use them and are accountable for their use which should not go against the provisions set in the license, and 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users. Flax weights are available in Hugging Face Hub as part of the Stable Diffusion repo. The Stable Diffusion model is distributed under the CreateML OpenRail-M license. It's an open license that claims no rights on the outputs you generate and prohibits you from deliberately producing illegal or harmful content. The [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) provides more details, so take a moment to read them and consider carefully whether you accept the license. If you do, you need to be a registered user in the Hub and use an access token for the code to work. You have two options to provide your access token: - Use the `huggingface-cli login` command-line tool in your terminal and paste your token when prompted. It will be saved in a file in your computer. - Or use `notebook_login()` in a notebook, which does the same thing. The following cell will present a login interface unless you've already authenticated before in this computer. You'll need to paste your access token. ``` python if not (Path.home()/'.huggingface'/'token').exists(): notebook_login() ``` TPU devices support `bfloat16`, an efficient half-float type. We'll use it for our tests, but you can also use `float32` to use full precision instead. ``` python dtype = jnp.bfloat16 ``` Flax is a functional framework, so models are stateless and parameters are stored outside them. Loading the pre-trained Flax pipeline will return both the pipeline itself and the model weights (or parameters). We are using a `bf16` version of the weights, which leads to type warnings that you can safely ignore. ``` python pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=dtype, ) ``` ## Inference Since TPUs usually have 8 devices working in parallel, we'll replicate our prompt as many times as devices we have. Then we'll perform inference on the 8 devices at once, each responsible for generating one image. Thus, we'll get 8 images in the same amount of time it takes for one chip to generate a single one. After replicating the prompt, we obtain the tokenized text ids by invoking the `prepare_inputs` function of the pipeline. The length of the tokenized text is set to 77 tokens, as required by the configuration of the underlying CLIP Text model. ``` python prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic" prompt = [prompt] * jax.device_count() prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids.shape ``` *Output*: ```bash (8, 77) ``` ### Replication and parallelization Model parameters and inputs have to be replicated across the 8 parallel devices we have. The parameters dictionary is replicated using `flax.jax_utils.replicate`, which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`. ``` python p_params = replicate(params) ``` ``` python prompt_ids = shard(prompt_ids) prompt_ids.shape ``` *Output*: ```bash (8, 1, 77) ``` That shape means that each one of the `8` devices will receive as an input a `jnp` array with shape `(1, 77)`. `1` is therefore the batch size per device. In TPUs with sufficient memory, it could be larger than `1` if we wanted to generate multiple images (per chip) at once. We are almost ready to generate images! We just need to create a random number generator to pass to the generation function. This is the standard procedure in Flax, which is very serious and opinionated about random numbers – all functions that deal with random numbers are expected to receive a generator. This ensures reproducibility, even when we are training across multiple distributed devices. The helper function below uses a seed to initialize a random number generator. As long as we use the same seed, we'll get the exact same results. Feel free to use different seeds when exploring results later in the notebook. ``` python def create_key(seed=0): return jax.random.PRNGKey(seed) ``` We obtain a rng and then "split" it 8 times so each device receives a different generator. Therefore, each device will create a different image, and the full process is reproducible. ``` python rng = create_key(0) rng = jax.random.split(rng, jax.device_count()) ``` JAX code can be compiled to an efficient representation that runs very fast. However, we need to ensure that all inputs have the same shape in subsequent calls; otherwise, JAX will have to recompile the code, and we wouldn't be able to take advantage of the optimized speed. The Flax pipeline can compile the code for us if we pass `jit = True` as an argument. It will also ensure that the model runs in parallel in the 8 available devices. The first time we run the following cell it will take a long time to compile, but subsequent calls (even with different inputs) will be much faster. For example, it took more than a minute to compile in a TPU v2-8 when I tested, but then it takes about **`7s`** for future inference runs. ``` python images = pipeline(prompt_ids, p_params, rng, jit=True)[0] ``` *Output*: ```bash CPU times: user 464 ms, sys: 105 ms, total: 569 ms Wall time: 7.07 s ``` The returned array has shape `(8, 1, 512, 512, 3)`. We reshape it to get rid of the second dimension and obtain 8 images of `512 × 512 × 3` and then convert them to PIL. ```python images = images.reshape((images.shape[0],) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) ``` ### Visualization Let's create a helper function to display images in a grid. ``` python def image_grid(imgs, rows, cols): w,h = imgs[0].size grid = Image.new('RGB', size=(cols*w, rows*h)) for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid ``` ``` python image_grid(images, 2, 4) ``` ![png](assets/108_stable_diffusion_jax/jax_stable_diffusion_1.png) ## Using different prompts We don't have to replicate the *same* prompt in all the devices. We can do whatever we want: generate 2 prompts 4 times each, or even generate 8 different prompts at once. Let's do that! First, we'll refactor the input preparation code into a handy function: ``` python prompts = [ "Labrador in the style of Hokusai", "Painting of a squirrel skating in New York", "HAL-9000 in the style of Van Gogh", "Times Square under water, with fish and a dolphin swimming around", "Ancient Roman fresco showing a man working on his laptop", "Close-up photograph of young black woman against urban background, high quality, bokeh", "Armchair in the shape of an avocado", "Clown astronaut in space, with Earth in the background", ] ``` ``` python prompt_ids = pipeline.prepare_inputs(prompts) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, p_params, rng, jit=True).images images = images.reshape((images.shape[0], ) + images.shape[-3:]) images = pipeline.numpy_to_pil(images) image_grid(images, 2, 4) ``` ![png](assets/108_stable_diffusion_jax/jax_stable_diffusion_2.png) ------------------------------------------------------------------------ ## How does parallelization work? We said before that the `diffusers` Flax pipeline automatically compiles the model and runs it in parallel on all available devices. We'll now briefly look inside that process to show how it works. JAX parallelization can be done in multiple ways. The easiest one revolves around using the `jax.pmap` function to achieve single-program, multiple-data (SPMD) parallelization. It means we'll run several copies of the same code, each on different data inputs. More sophisticated approaches are possible, we invite you to go over the [JAX documentation](https://jax.readthedocs.io/en/latest/index.html) and the [`pjit` pages](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit) to explore this topic if you are interested! `jax.pmap` does two things for us: - Compiles (or `jit`s) the code, as if we had invoked `jax.jit()`. This does not happen when we call `pmap`, but the first time the pmapped function is invoked. - Ensures the compiled code runs in parallel in all the available devices. To show how it works we `pmap` the `_generate` method of the pipeline, which is the private method that runs generates images. Please, note that this method may be renamed or removed in future releases of `diffusers`. ``` python p_generate = pmap(pipeline._generate) ``` After we use `pmap`, the prepared function `p_generate` will conceptually do the following: - Invoke a copy of the underlying function `pipeline._generate` in each device. - Send each device a different portion of the input arguments. That's what sharding is used for. In our case, `prompt_ids` has shape `(8, 1, 77, 768)`. This array will be split in `8` and each copy of `_generate` will receive an input with shape `(1, 77, 768)`. We can code `_generate` completely ignoring the fact that it will be invoked in parallel. We just care about our batch size (`1` in this example) and the dimensions that make sense for our code, and don't have to change anything to make it work in parallel. The same way as when we used the pipeline call, the first time we run the following cell it will take a while, but then it will be much faster. ``` python images = p_generate(prompt_ids, p_params, rng) images = images.block_until_ready() images.shape ``` *Output*: ```bash CPU times: user 118 ms, sys: 83.9 ms, total: 202 ms Wall time: 6.82 s (8, 1, 512, 512, 3) ``` We use `block_until_ready()` to correctly measure inference time, because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking will occur automatically when you want to use the result of a computation that has not yet been materialized.
blog/stable_diffusion_jax.md/0
{ "file_path": "blog/stable_diffusion_jax.md", "repo_id": "blog", "token_count": 3749 }
46
--- title: "Text-Generation Pipeline on Intel® Gaudi® 2 AI Accelerator" thumbnail: /blog/assets/textgen-pipe-gaudi/thumbnail.png authors: - user: siddjags guest: true --- # Text-Generation Pipeline on Intel® Gaudi® 2 AI Accelerator With the Generative AI (GenAI) revolution in full swing, text-generation with open-source transformer models like Llama 2 has become the talk of the town. AI enthusiasts as well as developers are looking to leverage the generative abilities of such models for their own use cases and applications. This article shows how easy it is to generate text with the Llama 2 family of models (7b, 13b and 70b) using Optimum Habana and a custom pipeline class – you'll be able to run the models with just a few lines of code! This custom pipeline class has been designed to offer great flexibility and ease of use. Moreover, it provides a high level of abstraction and performs end-to-end text-generation which involves pre-processing and post-processing. There are multiple ways to use the pipeline - you can run the `run_pipeline.py` script from the Optimum Habana repository, add the pipeline class to your own python scripts, or initialize LangChain classes with it. ## Prerequisites Since the Llama 2 models are part of a gated repo, you need to request access if you haven't done it already. First, you have to visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept the terms and conditions. After you are granted access by Meta (it can take a day or two), you have to request access [in Hugging Face](https://huggingface.co/meta-llama/Llama-2-7b-hf), using the same email address you provided in the Meta form. After you are granted access, please login to your Hugging Face account by running the following command (you will need an access token, which you can get from [your user profile page](https://huggingface.co/settings/tokens)): ```bash huggingface-cli login ``` You also need to install the latest version of Optimum Habana and clone the repo to access the pipeline script. Here are the commands to do so: ```bash pip install optimum-habana==1.10.4 git clone -b v1.10-release https://github.com/huggingface/optimum-habana.git ``` In case you are planning to run distributed inference, install DeepSpeed depending on your SynapseAI version. In this case, I am using SynapseAI 1.14.0. ```bash pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.14.0 ``` Now you are all set to perform text-generation with the pipeline! ## Using the Pipeline First, go to the following directory in your `optimum-habana` checkout where the pipeline scripts are located, and follow the instructions in the `README` to update your `PYTHONPATH`. ```bash cd optimum-habana/examples/text-generation pip install -r requirements.txt cd text-generation-pipeline ``` If you wish to generate a sequence of text from a prompt of your choice, here is a sample command. ```bash python run_pipeline.py --model_name_or_path meta-llama/Llama-2-7b-hf --use_hpu_graphs --use_kv_cache --max_new_tokens 100 --do_sample --prompt "Here is my prompt" ``` You can also pass multiple prompts as input and change the temperature and top_p values for generation as follows. ```bash python run_pipeline.py --model_name_or_path meta-llama/Llama-2-13b-hf --use_hpu_graphs --use_kv_cache --max_new_tokens 100 --do_sample --temperature 0.5 --top_p 0.95 --prompt "Hello world" "How are you?" ``` For generating text with large models such as Llama-2-70b, here is a sample command to launch the pipeline with DeepSpeed. ```bash python ../../gaudi_spawn.py --use_deepspeed --world_size 8 run_pipeline.py --model_name_or_path meta-llama/Llama-2-70b-hf --max_new_tokens 100 --bf16 --use_hpu_graphs --use_kv_cache --do_sample --temperature 0.5 --top_p 0.95 --prompt "Hello world" "How are you?" "Here is my prompt" "Once upon a time" ``` ## Usage in Python Scripts You can use the pipeline class in your own scripts as shown in the example below. Run the following sample script from `optimum-habana/examples/text-generation/text-generation-pipeline`. ```python import argparse import logging from pipeline import GaudiTextGenerationPipeline from run_generation import setup_parser # Define a logger logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) # Set up an argument parser parser = argparse.ArgumentParser() args = setup_parser(parser) # Define some pipeline arguments. Note that --model_name_or_path is a required argument for this script args.num_return_sequences = 1 args.model_name_or_path = "meta-llama/Llama-2-7b-hf" args.max_new_tokens = 100 args.use_hpu_graphs = True args.use_kv_cache = True args.do_sample = True # Initialize the pipeline pipe = GaudiTextGenerationPipeline(args, logger) # You can provide input prompts as strings prompts = ["He is working on", "Once upon a time", "Far far away"] # Generate text with pipeline for prompt in prompts: print(f"Prompt: {prompt}") output = pipe(prompt) print(f"Generated Text: {repr(output)}") ``` > You will have to run the above script with `python <name_of_script>.py --model_name_or_path a_model_name` as `--model_name_or_path` is a required argument. However, the model name can be programatically changed as shown in the python snippet. This shows us that the pipeline class operates on a string input and performs data pre-processing as well as post-processing for us. ## LangChain Compatibility The text-generation pipeline can be fed as input to LangChain classes via the `use_with_langchain` constructor argument. You can install LangChain as follows. ```bash pip install langchain==0.0.191 ``` Here is a sample script that shows how the pipeline class can be used with LangChain. ```python import argparse import logging from langchain.llms import HuggingFacePipeline from langchain.prompts import PromptTemplate from langchain.chains import LLMChain from pipeline import GaudiTextGenerationPipeline from run_generation import setup_parser # Define a logger logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) # Set up an argument parser parser = argparse.ArgumentParser() args = setup_parser(parser) # Define some pipeline arguments. Note that --model_name_or_path is a required argument for this script args.num_return_sequences = 1 args.model_name_or_path = "meta-llama/Llama-2-13b-chat-hf" args.max_input_tokens = 2048 args.max_new_tokens = 1000 args.use_hpu_graphs = True args.use_kv_cache = True args.do_sample = True args.temperature = 0.2 args.top_p = 0.95 # Initialize the pipeline pipe = GaudiTextGenerationPipeline(args, logger, use_with_langchain=True) # Create LangChain object llm = HuggingFacePipeline(pipeline=pipe) template = """Use the following pieces of context to answer the question at the end. If you don't know the answer,\ just say that you don't know, don't try to make up an answer. Context: Large Language Models (LLMs) are the latest models used in NLP. Their superior performance over smaller models has made them incredibly useful for developers building NLP enabled applications. These models can be accessed via Hugging Face's `transformers` library, via OpenAI using the `openai` library, and via Cohere using the `cohere` library. Question: {question} Answer: """ prompt = PromptTemplate(input_variables=["question"], template=template) llm_chain = LLMChain(prompt=prompt, llm=llm) # Use LangChain object question = "Which libraries and model providers offer LLMs?" response = llm_chain(prompt.format(question=question)) print(f"Question 1: {question}") print(f"Response 1: {response['text']}") question = "What is the provided context about?" response = llm_chain(prompt.format(question=question)) print(f"\nQuestion 2: {question}") print(f"Response 2: {response['text']}") ``` > The pipeline class has been validated for LangChain version 0.0.191 and may not work with other versions of the package. ## Conclusion We presented a custom text-generation pipeline on Intel® Gaudi® 2 AI accelerator that accepts single or multiple prompts as input. This pipeline offers great flexibility in terms of model size as well as parameters affecting text-generation quality. Furthermore, it is also very easy to use and to plug into your scripts, and is compatible with LangChain. > Use of the pretrained model is subject to compliance with third party licenses, including the “Llama 2 Community License Agreement” (LLAMAV2). For guidance on the intended use of the LLAMA2 model, what will be considered misuse and out-of-scope uses, who are the intended users and additional terms please review and read the instructions in this link [https://ai.meta.com/llama/license/](https://ai.meta.com/llama/license/). Users bear sole liability and responsibility to follow and comply with any third party licenses, and Habana Labs disclaims and will bear no liability with respect to users’ use or compliance with third party licenses. To be able to run gated models like this Llama-2-70b-hf, you need the following: > * Have a HuggingFace account > * Agree to the terms of use of the model in its model card on the HF Hub > * set a read token > * Login to your account using the HF CLI: run huggingface-cli login before launching your script
blog/textgen-pipe-gaudi.md/0
{ "file_path": "blog/textgen-pipe-gaudi.md", "repo_id": "blog", "token_count": 2807 }
47
--- title: "Speculative Decoding for 2x Faster Whisper Inference" thumbnail: /blog/assets/whisper-speculative-decoding/thumbnail.png authors: - user: sanchit-gandhi --- # Speculative Decoding for 2x Faster Whisper Inference <a target="_blank" href="https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/> </a> Open AI's [Whisper](https://openai.com/research/whisper) is a general purpose speech transcription model that achieves state-of-the-art results across a range of different benchmarks and audio conditions. The latest [large-v3](https://huggingface.co/openai/whisper-large-v3) model tops the [OpenASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard), ranking as the best open-source speech transcription model for English. The model also demonstrates strong multilingual performance, achieving less than 30% word error rate (WER) on 42 of the 58 languages tested in the Common Voice 15 dataset. While the transcription accuracy is exceptional, the inference time is very slow. A 1 hour audio clip takes upwards of 6 minutes to transcribe on a 16GB T4 GPU, even after leveraging inference optimisations like [flash attention](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2), half-precision, and [chunking](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline.chunk_length_s). In this blog post, we demonstrate how Speculative Decoding can be employed to reduce the inference time of Whisper by a **factor of 2**, while mathematically ensuring exactly the **same outputs** are achieved from the model. As a result, this method provides a perfect drop-in replacement for existing Whisper pipelines, since it provides free 2x speed-up while maintaining the same accuracy. For a more streamlined version of the blog post with fewer explanations but all the code, see the accompanying [Google Colab](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/speculative_decoding.ipynb). ## Speculative Decoding Speculative Decoding was proposed in [Fast Inference from Transformers via Speculative Decoding](https://arxiv.org/abs/2211.17192) by Yaniv Leviathan et. al. from Google. It works on the premise that a faster, **assistant model** very often generates the same tokens as a larger **main model**. First, the assistant model auto-regressively generates a sequence of \\( N \\) *candidate tokens*, \\( \hat{\boldsymbol{y}}_{1:N} \\). In the diagram below, the assistant model generates a sequence of 5 candidate tokens: `The quick brown sock jumps`. <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_1.mp4" ></video> </figure> While these candidate tokens are generated quickly, they may differ from those predicted by the main model. Therefore, in the second step, the candidate tokens are passed to the main model to be "verified". The main model takes the candidate tokens as input and performs a **single forward pass**. The outputs of the main model are the "correct" token for each step in the token sequence \\( \boldsymbol{y}_{1:N} \\). <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_2.mp4" ></video> </figure> In the diagram above, we see that the first three tokens predicted by the main model agree with those from the assistant model: <span style="color:green">The quick brown</span>. However, the fourth candidate token from the assistant model, <span style="color:red">sock</span>, mismatches with the correct token from the main model, <span style="color:green">fox</span>. We know that all candidate tokens up to the first mismatch are correct (<span style="color:green">The quick brown</span>), since these agree with the predictions from the main model. However, after the first mismatch, the candidate tokens diverge from the actual tokens predicted by the main model. Therefore, we can replace the first incorrect candidate token (<span style="color:red">sock</span>) with the correct token from the main model (<span style="color:green">fox</span>), and discard all predicted tokens that come after this, since these have diverged. The corrected sequence, `The quick brown fox`, now forms the new input to the assistant model: <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_3.mp4" ></video> </figure> The inference process then repeats, the assistant model generating a new set of \\( N \\) candidate tokens, which are verified in a single forward pass by the main model. <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" controls playsinline src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/whisper-speculative-decoding/split_4.mp4" ></video> </figure> Since we auto-regressively generate using the fast, assistant model, and only perform verification forward passes with the slow, main model, the decoding process is sped-up substantially. Furthermore, the verification forward passes performed by the main model ensures that **exactly the same outputs** are achieved as if we were using the main model standalone. This makes speculative decoding a perfect drop-in for existing Whisper pipelines, since one can be certain that the same quality will be attained. To get the biggest improvement in latency, the assistant model should be significantly faster than the main model, while predicting the same token distribution as often as possible. In practice, these two attributes form a trade-off: the faster a model is, the less accurate it is. However, since 70-80% of all predicted tokens tend to be "easier" tokens, this trade-off is heavily biased towards selecting a faster model, rather than a more accurate one. Thus, the assistant model should be at least 3x faster than the main model (the more the better), while predicting all the "easy" tokens in the examples correctly. The remaining 20-30% of more "difficult" tokens can then be verified by the larger, main model. The only constraint for selecting an assistant model is that it must share the same vocabulary as the main model. That is to say, the assistant model must use one-to-one the same tokenizer as the main model. Therefore, if we want to use speculative decoding with a multilingual variant of Whisper, e.g. [large-v2](https://huggingface.co/openai/whisper-large-v2) (multilingual), we need to select a multilingual variant of Whisper as the assistant model, e.g. [tiny](https://huggingface.co/openai/tiny). Whereas, if we want to use speculative decoding with and English-only version of Whisper, e.g. [medium.en](https://huggingface.co/openai/whisper-medium.en), we need an English-only of version as the assistant model, e.g. [tiny.en](https://huggingface.co/openai/tiny.en). At the current time, Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) is an exception, since it is the only Whisper checkpoint with an expanded vocabulary size, and thus is not compatible with previous Whisper checkpoints. Now that we know the background behind speculative decoding, we're ready to dive into the practical implementation. In the [🤗 Transformers](https://huggingface.co/docs/transformers/index) library, speculative decoding is implemented as the "assisted generation" inference strategy. For more details about the implementation, the reader is advised to read Joao Gante's excellent blog post on [Assisted Generation](https://huggingface.co/blog/assisted-generation). ## English Speech Transcription ### Baseline Implementation We start by benchmarking Whisper [large-v2](https://huggingface.co/openai/whisper-large-v2) to get our baseline number for inference speed. We can load the main model and it's corresponding processor via the convenient [`AutoModelForSpeechSeq2Seq`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForSpeechSeq2Seq) and [`AutoProcessor`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor) classes. We'll load the model in `float16` precision and make sure that loading time takes as little time as possible by passing [`low_cpu_mem_usage=True`](https://huggingface.co/docs/transformers/main_classes/model#large-model-loading). In addition, we want to make sure that the model is loaded in [safetensors](https://huggingface.co/docs/diffusers/main/en/using-diffusers/using_safetensors) format by passing [`use_safetensors=True`](https://huggingface.co/docs/transformers/main_classes/model#transformers.PreTrainedModel.from_pretrained.use_safetensors). Finally, we'll pass the argument `attn_implementation="sdpa"` to benefit from Flash Attention speed-ups through PyTorch's [SDPA attention kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html): ```python import torch from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor device = "cuda:0" if torch.cuda.is_available() else "cpu" torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 model_id = "openai/whisper-large-v2" model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) model.to(device) processor = AutoProcessor.from_pretrained(model_id) ``` Let's load the English speech transcription dataset that we will use for benchmarking. We'll load a small dataset consisting of 73 samples from the [LibriSpeech ASR](https://huggingface.co/datasets/librispeech_asr) validation-clean dataset. This amounts to ~9MB of data, so it's very lightweight and quick to download on device: ```python from datasets import load_dataset dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ``` For the benchmark, we only want to measure the generation time, so let's write a short helper function that measures this step. The following function will return both the decoded tokens and the time it took to run the model: ```python import time def generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` We can now iterate over the audio samples in our dataset and sum up the overall generation time: ```python from tqdm import tqdm all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Output:** ``` 100%|██████████| 73/73 [01:37<00:00, 1.33s/it] 72.99542546272278 ``` Alright! We see that transcribing the 73 samples took 73 seconds. Let's check the WER of the predictions: ```python from evaluate import load wer = load("wer") print(wer.compute(predictions=predictions, references=references)) ``` **Output:** ``` 0.03507271171941831 ``` Our final baseline number is 73 seconds for a WER of 3.5%. ### Speculative Decoding Now let's load the assistant model for speculative decoding. In this example, we'll use a distilled variant of Whisper, [distil-large-v2](https://huggingface.co/distil-whisper/distil-large-v2). The distilled model copies the entire encoder from Whisper, but only 2 of the 32 decoder layers. As such, it runs 6x faster than Whisper, while performing to within 1% WER on out-of-distribution test sets. This makes it the perfect choice as an assistant model, since it has both high transcription accuracy and fast generation \\({}^1\\). Since Distil-Whisper uses exactly the same encoder as the Whisper model, we can share the encoder across the main and assistant models. We then only have to load the 2-layer decoder from Distil-Whisper as a "decoder-only" model. We can do this through the convenient [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) auto class. In practice, this results in only an 8% increase to VRAM over using the main model alone. ```python from transformers import AutoModelForCausalLM assistant_model_id = "distil-whisper/distil-large-v2" assistant_model = AutoModelForCausalLM.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device) ``` ------------------------------------------------------------------------ \\({}^1\\) We intend to release an improved variant of Distil-Whisper with a stronger alignment in the token distribution that will improve speculative decoding performance further. Follow the [Distil-Whisper repository](https://github.com/huggingface/distil-whisper) for updates. ------------------------------------------------------------------------ We can define a modified function for our speculative decoding benchmark. The only difference from the previous function is that we pass the assistant model to our call to `.generate`: ```python def assisted_generate_with_time(model, inputs, **kwargs): start_time = time.time() outputs = model.generate(**inputs, assistant_model=assistant_model, **kwargs) generation_time = time.time() - start_time return outputs, generation_time ``` Let's run the benchmark with speculative decoding, using Distil-Whisper as the assistant to Whisper: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs) all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["text"])) print(all_time) ``` **Outputs:** ``` 100%|██████████| 73/73 [00:38<00:00, 1.88it/s] 32.69683289527893 ``` With speculative decoding, the inference time was just 33 seconds, 2.2x faster than before! Let's verify we have the same WER: ```python print(wer.compute(predictions=predictions, references=references)) ``` **Outputs:** ``` 0.03507271171941831 ``` Perfect! 3.5% WER again, as we have identical outputs to using the main model standalone. Speculative decoding can also be used with the easy 🤗 Transformers [pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API for inference. Below, we instantiate the pipeline using the model and processor, and then use it to transcribe the first sample from the toy dataset. This can be extended to transcribe audio samples of arbitrary length, including with the use of batching: ```python from transformers import pipeline pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, chunk_length_s=15, batch_size=4, generate_kwargs={"assistant_model": assistant_model}, torch_dtype=torch_dtype, device=device, ) sample = dataset[0]["audio"] result = pipe(sample) print(result["text"]) ``` **Outputs:** ``` Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel. ``` An end-to-end code snippet for running speculative decoding with Whisper and Distil-Whisper can be found on the [Distil-Whisper model card](https://huggingface.co/distil-whisper/distil-large-v2#speculative-decoding). It combines the stages of inference covered in this notebook into a single code example. ## Multilingual Speech Transcription Distil-Whisper is the perfect assistant model for English speech transcription, since it performs to within 1% WER of the original Whisper model, while being 6x faster over short and long-form audio samples. However, the official Distil-Whisper checkpoints are English only, meaning they cannot be used for multilingual speech transcription. To use speculative decoding for multilingual speech transcription, one could either use one of the [official multilingual Whisper checkpoints](https://huggingface.co/openai/whisper-large-v2#model-details), or a fine-tuned variant of Whisper. At the time of writing, there are over 5,000 [fine-tuned Whisper checkpoints](https://huggingface.co/models?other=whisper) on the Hugging Face Hub in over 100 languages. These provide an excellent starting point for selecting assistant Whisper checkpoints that perform very well on a single language. In this example, we'll use the smallest official multilingual checkpoint, Whisper [tiny](https://huggingface.co/openai/whisper-tiny). Feel free to experiment with different checkpoints fine-tuned in your language! Let's load the weights for our new assistant model, Whisper tiny. Since the encoder in Whisper tiny differs from that in large-v2, this time we'll load both the encoder and decoder using the `AutoModelForSpeechSeq2Seq` class: ```python assistant_model_id = "openai/whisper-tiny" assistant_model = AutoModelForSpeechSeq2Seq.from_pretrained( assistant_model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True, attn_implementation="sdpa", ) assistant_model.to(device); ``` For our benchmarking dataset, we'll load 73 samples from the Dutch ("nl") split of the [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) dataset: ```python dataset = load_dataset("sanchit-gandhi/voxpopuli_dummy", "nl", split="validation") ``` Great! We can now re-run our benchmark for our baseline Whisper large-v2 model as before. The only change we make is that we pass the language and task arguments to our generate function, in order to ensure we perform speech transcription (not speech translation). Speculative decoding is fully compatible with both the speech transcription and translation tasks. Simply set the task argument as required below: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [02:05<00:00, 1.72s/it] Time: 116.50992178916931 WER: 0.127190136275146 ``` Right! We have our baseline time of 117 seconds and a WER of 12.8%. Let's re-run the generation process using speculative decoding: ```python all_time = 0 predictions = [] references = [] for sample in tqdm(dataset): audio = sample["audio"] inputs = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt") inputs = inputs.to(device=device, dtype=torch.float16) output, gen_time = assisted_generate_with_time(model, inputs, language="nl", task="transcribe") all_time += gen_time predictions.append(processor.batch_decode(output, skip_special_tokens=True, normalize=True)[0]) references.append(processor.tokenizer._normalize(sample["normalized_text"])) wer_result = wer.compute(predictions=predictions, references=references) print("Time:", all_time) print("WER:", wer_result) ``` **Outputs:** ``` 100%|██████████| 73/73 [01:08<00:00, 1.06it/s] Time: 62.10229682922363 WER: 0.127190136275146 ``` Again, we achieve 12.8% WER, but this time in just 62 seconds of inference time, representing a speed-up of 1.9x. Given the low overhead of loading the assistant model and the mathematical property that exactly the same outputs are achieved, speculative decoding offers the perfect drop-in replacement to existing Whisper pipelines. ## Strategies for Efficient Speculative Decoding In this final section, we cover two strategies for ensuring the fastest possible inference time with speculative decoding. #### Assistant Model Our objective is to select an assistant model that is at least 3x faster than the main model **and** transcribes at least 70-80% of the predicted tokens correctly, typically the "easier" tokens in the examples. If you have a particular language in which you want to transcribe, an effective strategy is to train two Whisper models of different sizes, and use one as the assistant to the other: * First, fine-tune Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) to act as your main model * Second, distil Whisper [large-v3](https://huggingface.co/openai/whisper-large-v3) on the same dataset to act as a fast assistant model Fine-tuning and distillation can improve the WER performance of both the main and assistant models on your chosen language, while maximising the alignment in the token distributions. A complete guide to Whisper fine-tuning can be found [here](https://huggingface.co/blog/fine-tune-whisper), and distillation [here](https://github.com/huggingface/distil-whisper/tree/main/training). #### Batch Size It is worth noting that the largest speed gains with speculative decoding come with a batch size of 1. For batched speculative decoding, all candidate tokens **across the batch** must match the validation tokens in order for the tokens to be accepted. If a token in the batch at a given position does not agree, all candidate tokens that proceed the position are discarded. Consequently, speculative decoding favours lower batch sizes. In practice, we find that speculative decoding provides a speed-up until a batch size of 4. Above batch size 4, speculative decoding returns slower inference than the main model alone. For full results, refer to Section D.3 of the [Distil-Whisper paper](https://arxiv.org/pdf/2311.00430.pdf). ## Conclusion In this blog post, we covered the inference strategy of speculative decoding, as applied to the Whisper model for speech transcription. We demonstrated how 2x speed-ups can be achieved, while mathematically ensuring the same outputs as using the original model alone. We encourage you to try speculative decoding as a drop-in replacement for existing Whisper pipelines, given the low overhead of using the additional assistant model and the guarantee of the same transcription results. ## Acknowledgements Blog post by [Sanchit Gandhi](https://huggingface.co/sanchit-gandhi). Many thanks to [Patrick von Platen](https://huggingface.co/patrickvonplaten) and [Pedro Cuenca](https://huggingface.co/pcuenq) for their constructive comments, and to [Joao Gante](https://huggingface.co/joaogante) for the assisted generation implementation in 🤗 Transformers.
blog/whisper-speculative-decoding.md/0
{ "file_path": "blog/whisper-speculative-decoding.md", "repo_id": "blog", "token_count": 7201 }
48
--- title: "如何成功将 🤗 API 客户的 transformer 模型推理速度加快 100 倍" thumbnail: /blog/assets/09_accelerated_inference/thumbnail.png translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 如何成功将 🤗 API 客户的 transformer 模型推理速度加快 100 倍 🤗 Transformers 已成为世界各地数据科学家用以探索最先进 NLP 模型、构建新 NLP 模块的默认库。它拥有超过 5000 个预训练和微调的模型,支持 250 多种语言,任君取用。无论你使用哪种框架,都能用得上它。 虽然在 🤗 Transformers 中试验模型很容易,但以最高性能将这些大模型部署到生产中,并将它们用可扩展的架构管理起来,对于任何机器学习工程师来说都是一个 **艰巨的工程挑战**。 100 倍性能提升及内置可扩展性是用户选择在我们托管的 [Accelerated Inference API](https://huggingface.co/pricing) 基础上构建自己的 NLP 模块的原因。尤其是为了实现 **最后那 10 倍性能** 提升,我们需要进行底层的、特定于模型且特定于目标硬件的优化。 本文分享了我们为用户充分榨干每一滴计算资源所使用的一些方法。 🍋 ## 获取首个 10 倍加速 优化之旅的第一站相对来讲是最容易的,主要涉及到 [Hugging Face 库](https://github.com/huggingface/) 提供的所有平台无关的优化技术。 我们在 Hugging Face 模型的 [流水线 (`pipeline` )](https://huggingface.co/transformers/main_classes/pipelines.html) 中集成了能有效减少每次前向传播计算量的最佳方法。这些方法因模型架构和目标任务不同而不同,例如,对基于 GPT 架构的模型的文本生成任务,我们通过缓存过去时刻的注意力矩阵,而仅计算每一轮中最后一个新词元的注意力,来减小参与计算的注意力矩阵的维度: -| 原始版 | 优化版 | -|:---------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------:| -|![](/blog/assets/09_accelerated_inference/unoptimized_graph.png)|![](/blog/assets/09_accelerated_inference/optimized_graph.png)| 分词常常成为推理效率的瓶颈。我们在 [🤗 Tokenizers](https://github.com/huggingface/tokenizers/) 库中实现了高效的算法,用 Rust 来实现模型分词器并与智能缓存技术相结合,获得了高达 10 倍的端到端延迟加速。 利用 Hugging Face 库的最新功能,在相同的模型及硬件上,与开箱即用的部署相比,我们稳定达到了 10 倍加速。由于 Transformer 和 Tokenizer 通常每月都会发版,因此我们的 API 客户无需不断适配新的优化,即可让自己的模型越跑越快。 ## 为了胜利而编译: 10 倍加速硬核技术 现在到真正棘手的地方了。为了获得最佳性能,我们需要修改模型并针对特定硬件进行编译以优化推理速度。选择什么硬件取决于模型 (内存大小) 和需求情况 (对请求进行组批)。即使是使用相同的模型来进行预测,一些 API 客户可能会更受益于 CPU 推理加速,而其他客户可能会更受益于 GPU 推理加速,而每种硬件会涉及不同的优化技术以及库。 一旦为针对应用场景选定计算平台,我们就可以开始工作了。以下是一些可应用于静态图的针对 CPU 的优化技术: - 图优化 (删除无用节点和边) - 层融合 (使用特定的 CPU 算子) - 量化 使用开源库中的开箱即用功能 (例如 🤗 Transformers 结合 [ONNX Runtime](https://github.com/microsoft/onnxruntime)) 很难得到最佳的结果,或者会有明显的准确率损失,特别是在使用量化方法时。没有什么灵丹妙药,每个模型架构的最佳优化方案都不同。但深入研究 Transformers 代码和 ONNX Runtime 文档,星图即会显现,我们就能够组合出适合目标模型和硬件的额外的 10 倍加速方案。 ## 不公平的优势 从 NLP 起家的 Transformer 架构是机器学习性能的决定性转折点,在过去 3 年中,自然语言理解和生成的进展急剧加快,同时水涨船高的是模型的平均大小,从 BERT 的 110M 参数到现在 GPT-3 的 175B 参数。 这种趋势给机器学习工程师将最新模型部署到生产中带来了严峻的挑战。虽然 100 倍加速是一个很高的标准,但惟有这样才能满足消费级应用对实时性的需求。 为了达到这个标准,作为 Hugging Face 的机器学习工程师,我们与 🤗 Transformers 和 🤗 Tokenizers 维护人员 😬 相邻而坐,相对其他机器学习工程师而言当然拥有不公平的优势。更幸运的是,通过与英特尔、英伟达、高通、亚马逊和微软等硬件及云供应商的开源合作建立起的广泛合作伙伴关系,我们还能够使用最新的硬件优化技术来优化我们的模型及基础设施。 如果你想感受我们基础设施的速度,可以 [免费试用](https://huggingface.co/pricing) 一下,我们也会与你联系。 如果你想在自己的基础设施实施我们的推理优化,请加入我们的 [🤗 专家加速计划](https://huggingface.co/support)。
blog/zh/accelerated-inference.md/0
{ "file_path": "blog/zh/accelerated-inference.md", "repo_id": "blog", "token_count": 3295 }
49
--- title: "数据好合:Argilla 和 Hugging Face Spaces 赋能社区合力构建更好的数据集" thumbnail: /blog/assets/community-datasets/thumbnail.png authors: - user: davanstrien - user: dvilasuero guest: true translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 数据好合: Argilla 和 Hugging Face Spaces 携手赋能社区合力构建更好的数据集 最近,Argilla 和 Hugging Face 共同 [推出](https://huggingface.co/posts/dvilasuero/680660181190026) 了 `Data is Better Together` 计划,旨在凝聚社区力量协力构建一个对提示进行排名的偏好数据集。仅用几天,我们就吸引了: - 350 个社区贡献者参与数据标注 - 超过 11,000 个提示评分 你可通过 [进度面板](https://huggingface.co/spaces/DIBT/prompt-collective-dashboard) 了解最新的统计数据! 基于此,我们发布了 [`10k_prompts_ranked`](https://huggingface.co/datasets/DIBT/10k_prompts_ranked) 数据集,该数据集共有 1 万条提示,其中每条提示都附带用户的质量评分。我们希望后续能开展更多类似的项目! 本文将讨论为什么我们认为社区合作构建数据集至关重要,并邀请大家作为首批成员加入社区,[Argilla](https://argilla.io/) 和 Hugging Face 将共同支持社区开发出更好的数据集! ## “无数据,不模型”仍是颠扑不破的真理 数据对于训练出更好的模型仍具有至关重要的作用: [现有的研究](https://huggingface.co/papers/2402.05123) 及开源 [实验](https://argilla.io/blog/notus7b) 不断地证明了这一点,开源社区的实践也表明更好的数据才能训练出更好的模型。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/17480bfba418032faec37da19e9c678ac9eeed43/blog/community-datasets/why-model-better.png" alt="Screenshot of datasets in the Hugging Face Hub"><br> <em>问题</em> </p> <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/17480bfba418032faec37da19e9c678ac9eeed43/blog/community-datasets/data-is-the-answer.png" alt="Screenshot of datasets in the Hugging Face Hub"><br> <em>常见答案</em> </p> ## 为什么需要社区合力构建数据集? “数据对于机器学习至关重要”已获得广泛共识,但现实是对很多语言、领域和任务而言,我们仍然缺乏用于训练、评估以及基准测试的高质量数据集。解决这一问题的路径之一是借鉴 Hugging Face Hub 的经验,目前,社区已通过 Hugging Face Hub 共享了数千个模型、数据集及演示应用,开放的 AI 社区协力创造了这一令人惊叹的成果。我们完全可以将这一经验推广,促成社区协力构建下一代数据集,从而为构建下一代模型提供独特而宝贵的数据基础。 赋能社区协力构建和改进数据集得好处有: - 无需任何机器学习或编程基础,人人皆能为开源机器学习的发展作出贡献。 - 可为特定语言创建聊天数据集。 - 可为特定领域开发基准数据集。 - 可创建标注者多样化的偏好数据集。 - 可为特定任务构建数据集。 - 可利用社区的力量协力构建全新的数据集。 重要的是,我们相信凭借社区的协力会构建出更好的数据集,同时也能让那些不会编码的人也能参与进来为 AI 的发展作贡献。 ### 让人人都能参与 之前许多协力构建 AI 数据集的努力面临的挑战之一是如何赋能大家以高效完成标注任务。Argilla 作为一个开源工具,可让大家轻松地为 LLM 或小型特化模型创建数据集,而 Hugging Face Spaces 是一个用于构建和托管机器学习演示应用的平台。最近,Argilla 对 Spaces 上托管的 Argilla 实例增加了对 Hugging Face 账户验证的支持,有了这个,用户现在仅需几秒钟即可开始参与标注任务。 <figure class="image table text-center m-0 w-full"> <video style="max-width: 90%; margin: auto;" autoplay loop muted playsinline src="https://video.twimg.com/ext_tw_video/1757693043619004416/pu/vid/avc1/1068x720/wh3DyY0nMcRJaMki.mp4?tag=12" ></video> </figure> 我们在创建 [`10k_prompts_ranked`](https://huggingface.co/datasets/DIBT/10k_prompts_ranked) 数据集时已对这个新的工作流进行了压力测试,我们已准备好支持社区创建新的协作数据集。 ## 首批加入数据集共建社区! 我们对这个新的、简单的托管标注工作流的潜力深感兴奋。为了支持社区构建更好的数据集,Hugging Face 和 Argilla 邀请感兴趣的个人或社区作为首批成员加入我们的数据集构建者社区。 加入这个社区,你将可以: - 创建支持 Hugging Face 身份验证的 Argilla Space。 Hugging Face 将为参与者提供免费的硬盘和增强型 CPU 资源。 - Argilla 和 Hugging Face 可提供额外的宣传渠道以助力项目宣传。 - 受邀加入相应的社区频道。 我们的目标是支持社区协力构建更好的数据集。我们对所有想法持开放态度,并愿竭尽所能支持社区协力构建更好的数据集。 ## 我们在寻找什么样的项目? 我们愿意支持各种类型的项目,尤其是现存的开源项目。我们对专注于为目前开源社区中数据不足的语言、领域和任务构建数据集的项目尤其感兴趣。当前我们唯一的限制是主要针对文本数据集。如果你对多模态数据集有好想法,我们也很乐意听取你的意见,但我们可能无法在第一批中提供支持。 你的任务可以是完全开放的,也可以是向特定 Hugging Face Hub 组织的成员开放的。 如果你想成为首批成员,请加入 [Hugging Face Discord](http://hf.co/join/discord) 中的 `#data-is-better-together` 频道,并告诉我们你想构建什么数据集! 期待与大家携手共建更好的数据集!
blog/zh/community-datasets.md/0
{ "file_path": "blog/zh/community-datasets.md", "repo_id": "blog", "token_count": 3753 }
50
--- title: "开发 Diffusers 库的道德行为指南" thumbnail: /blog/assets/ethics-diffusers/thumbnail.png authors: - user: giadap translators: - user: innovation64 - user: zhongdongy proofreader: true --- # 开发 Diffusers 库的道德行为指南 我们正在努力让我们每次发布的库更加负责! 我们很荣幸宣布我们发布了 [道德守则](https://huggingface.co/docs/diffusers/main/en/conceptual/ethical_guidelines),并将作为一部分其放入 [ Diffusers 库的说明文档](https://huggingface.co/docs/diffusers/main/en/index)。 由于扩散模型在现实世界上的实际应用例子会对社会造成潜在的负面影响,该守则旨在引导对于社区做出贡献的 Diffusers 库维护者进行技术决策。我们希望对于我们的决策进行更加透明,尤其是,我们想确认一些价值观来指导决策。 我们将道德准则作为一个引导价值,做出具体行动,然后持续适应新的条件的循环过程。基于此,我们致力于随着时间去不断更正我们的价值准则,不断跟进 Diffusers 项目的发展,并从社区持续收集反馈,使得准则始终保持有效。 # 道德守则 - **透明**: 我们致力于在管理 PR、向用户解释我们的选择以及做出技术决策方面保持透明。 - **一致性**: 我们致力于保证我们的用户在项目管理中得到同等程度的关注,保持技术上的稳定和一致。 - **简单性**: 为了让 Diffusers 库易于使用和利用,我们致力于保持项目目标的精简和连贯性。 - **可访问性**: Diffusers 项目帮助更多贡献者降低进入门槛即便没有专业技术也可以运行项目。这样做使得社区更容易获得研究成果。 - **可再现性**: 我们的目标是在使用 Diffusers 库时,使上游代码、模型和数据集的可再现性保持透明。 - **责任**: 作为一个社区,通过团队合作,我们通过预测和减轻该技术的潜在风险和危险来对我们的用户承担集体责任。 # 安全特性和机制 此外,我们提供了一个暂不全面的并希望不断扩展的列表,该列表是关于 Hugging Face 团队和更广泛的社区的实施的安全功能和机制。 - **[社区选项](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)**: 它使社区能够讨论并更好地协作项目。 - **标签功能**: 仓库的作者可以将他们的内容标记为“不适合所有人” - **偏差探索和评估**: Hugging Face 团队提供了一个 [Space](https://huggingface.co/spaces/society-ethics/DiffusionBiasExplorer) 以交互方式演示 Stable Diffusion 和 DALL-E 中的偏差。从这个意义上说,我们支持和鼓励有偏差的探索和评估。 - **鼓励安全部署** - **[Safe Stable Diffusion](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion_safe)**: 它缓解了众所周知的问题,像 Stable Diffusion,在未经过滤的,网络抓取的数据集上训练的模型往往会遭受不当的退化。相关论文: [Safe Latent Diffusion: Mitigating Inappropriate Degeneration in Diffusion Models](https://arxiv.org/abs/2211.05105). - **在 Hub 上分阶段发布**: 特别在敏感的情况下,应限制对某些仓库的访问。这是发布阶段的一个中间步骤,允许仓库的作者对其使用有更多的控制权限。 - **许可**: [OpenRAILs](https://huggingface.co/blog/open_rail), 是一种新型许可,可让我们确保自由访问,同时拥有一组限制,以确保更多负责任的用途。
blog/zh/ethics-diffusers.md/0
{ "file_path": "blog/zh/ethics-diffusers.md", "repo_id": "blog", "token_count": 2223 }
51
--- title: "使用 Transformers 进行图分类" thumbnail: /blog/assets/125_intro-to-graphml/thumbnail_classification.png authors: - user: clefourrier translators: - user: MatrixYao --- # 使用 Transformers 进行图分类 <div class="blog-metadata"> <small>Published April 14, 2023.</small> <a target="_blank" class="btn no-underline text-sm mb-5 font-sans" href="https://github.com/huggingface/blog/blob/main/graphml-classification.md"> Update on GitHub </a> </div> <div class="author-card"> <a href="/clefourrier"> <img class="avatar avatar-user" src="https://aeiljuispo.cloudimg.io/v7/https://s3.amazonaws.com/moonup/production/uploads/1644340617257-noauth.png?w=200&h=200&f=face" title="Gravatar"> <div class="bfc"> <code>clefourrier</code> <span class="fullname">Clémentine Fourrier</span> </div> </a> </div> 在之前的[博文](https://huggingface.co/blog/intro-graphml)中,我们探讨了图机器学习的一些理论知识。这一篇我们将探索如何使用 Transformers 库进行图分类。(你也可以从[此处](https://github.com/huggingface/blog/blob/main/notebooks/graphml-classification.ipynb)下载演示 notebook,跟着一起做!) 目前,Transformers 中唯一可用的图 transformer 模型是微软的 [Graphormer](https://arxiv.org/abs/2106.05234),因此本文的例子将会基于该模型。我们期待看到大家会使用并集成哪些其他模型进 🤗。 ## 软件 要学习本教程,需要安装 `datasets` 和 `transformers`(版本号 >= 4.27.2),你可以使用 `pip install -U datasets transformers` 来安装。 ## 数据 你可以使用自己的图数据集,也可以使用 [Hub 上已有的数据集](https://huggingface.co/datasets?task_categories=task_categories:graph-ml&sort=downloads)。本文我们主要使用已有的数据集,你也可以随时[添加你的数据集](https://huggingface.co/docs/datasets/upload_dataset)到 Hugging Face! ### 数据加载 从 Hub 加载图数据集非常简单。这里,我们加载 OGB 库中的 `ogbg-mohiv` 数据集(该数据集是斯坦福 [开放图基准(Open Graph Benchmark,OGB)](https://ogb.stanford.edu/) 的一部分): ```python from datasets import load_dataset # There is only one split on the hub dataset = load_dataset("OGB/ogbg-molhiv") dataset = dataset.shuffle(seed=0) ``` 这个数据集含三个拆分,`train`、`validation` 和 `test`,所有这些拆分每一行都表示一个图,每个图包含 5 个数据列( `edge_index`、`edge_attr`、`y`、`num_nodes`、`node_feat` ),你可以通过执行 `print(dataset)` 来查看。 如果你还安装了其他图处理库,你还可以用这些库把图可视化出来,并进一步检查数据集。例如,使用 PyGeometric 和 matplotlib: ```python import networkx as nx import matplotlib.pyplot as plt # We want to plot the first train graph graph = dataset["train"][0] edges = graph["edge_index"] num_edges = len(edges[0]) num_nodes = graph["num_nodes"] # Conversion to networkx format G = nx.Graph() G.add_nodes_from(range(num_nodes)) G.add_edges_from([(edges[0][i], edges[1][i]) for i in range(num_edges)]) # Plot nx.draw(G) ``` ### 格式 在 Hub 上,图数据集主要存储为图列表形式(使用 `jsonl` 格式)。 单个图表示为一个字典,以下是我们图分类数据集的理想格式: - `edge_index` 包含图上每条边对应的节点 ID,存储为包含两个`节点列表`的列表(即由一个源节点列表和一个目的节点列表组成的列表)。 - **类型**:2个整数列表的列表。 - **示例**:包含四个节点(0、1、2 和 3)且连接为 1->2、1->3 和 3->1 的图将具有 `edge_index = [[1, 1, 3]、[2、3、1]]`。你可能会注意到此处不存在节点 0,因为在本数据中它与其他节点无边连接。这就是下一个属性很重要的原因。 - `num_nodes` 表示图中可用节点的数目(默认情况下,假定节点按顺序编号)。 - **类型**:整数 - **示例**:在上例中,`num_nodes = 4`。 - `y` 每个图的预测标签(可以是类、属性值或是不同任务的多个二分类标签)。 - **Type**:整数列表(用于多分类)、浮点数(用于回归)或 0/1 列表(用于二元多任务分类) - **示例**:我们可以预测图规模(小 = 0,中 = 1,大 = 2)。本例中,`y = [0]`。 - `node_feat` 包含图中每个节点的可用特征(如果存在),按节点 ID 排序。 - **类型**:整数列表的列表(可选) - **例子**:如上例中的节点可以有一些类型特征(就像分子图中的节点是不同的原子,不同的原子有不同的类型一样)。打比方,本例中 `node_feat = [[1], [0], [1], [1]]`。 - `edge_attr` 包含图中每条边的可用属性(如果存在),按 `edge_index` 排序。 - **类型**:整数列表的列表(可选) - **例子**:仍使用上例,边也可以有类型(如分子中的键),如 edge_attr = [[0], [1], [1]]`。 ### 预处理 图 transformer 框架通常需要根据数据集进行特定的预处理,以生成有助于目标学习任务(在我们的案例中为分类)的特征和属性。 在这里,我们使用 `Graphormer` 的默认预处理,它生成进度/出度信息、节点间的最短路径以及模型感兴趣的其他属性。 ```python from transformers.models.graphormer.collating_graphormer import preprocess_item, GraphormerDataCollator dataset_processed = dataset.map(preprocess_item, batched=False) ``` 我们也可以在 `DataCollat​​or` 的参数中动态进行预处理(通过将 `on_the_fly_processing` 设置为 True)。但并非所有数据集都像 `ogbg-molhiv` 那样小,对于大图,动态预处理成本太高,因此需要预先进行预处理,并存储预处理后的数据供后续训练实验使用。 ## 模型 ### 模型加载 这里,我们加载一个已有的预训练模型及其 checkpoint 并在我们的下游任务上对其进行微调,该任务是一个二分类任务(因此 `num_classes = 2` )。我们还可以在回归任务 (`num_classes = 1`) 或多任务分类上微调我们的模型。 ```python from transformers import GraphormerForGraphClassification model = GraphormerForGraphClassification.from_pretrained( "clefourrier/pcqm4mv2_graphormer_base", num_classes=2, # num_classes for the downstream task ignore_mismatched_sizes=True, ) ``` 我们来看下细节。 在代码中调用 `from_pretrained` 方法来下载并缓存模型权重。由于类的数量(用于预测)取决于数据集,我们将新的 `num_classes` 和`ignore_mismatched_sizes` 与 `model_checkpoint` 一起传给该函数。这会触发函数创建一个自定义的、特定于该下游任务的分类头,这个头与原模型中的解码器头很可能是不同的。 我们也可以创建一个新的随机初始化的模型来从头开始训练,此时,我们既可以复用给定检查点的超参配置,也可以自己手动选择超参配置。 ### 训练或微调 为了简化模型训练,我们使用 `Trainer`。我们需要定义训练相关的配置以及评估指标来实例化 `Trainer`。我们主要使用 `TrainingArguments`类,这是一个包含所有配置项的类,用于定制训练配置。我们要给它一个文件夹名称,用于保存模型的 checkpoint。 ```python from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "graph-classification", logging_dir="graph-classification", per_device_train_batch_size=64, per_device_eval_batch_size=64, auto_find_batch_size=True, # batch size can be changed automatically to prevent OOMs gradient_accumulation_steps=10, dataloader_num_workers=4, #1, num_train_epochs=20, evaluation_strategy="epoch", logging_strategy="epoch", push_to_hub=False, ) ``` 对于图数据集,调整 batch size 和梯度累积步数来保证有效 batch size 够大同时又要避免内存不足,这件事尤为重要。 最后一个参数 `push_to_hub` 允许 `Trainer` 在训练期间定期将模型推送到 Hub,这个通常由保存步长来决定。 ```python trainer = Trainer( model=model, args=training_args, train_dataset=dataset_processed["train"], eval_dataset=dataset_processed["validation"], data_collator=GraphormerDataCollator(), ) ``` 在用于图分类的 `Trainer` 中,对给定的图数据集使用正确的数据整理器(data collator)很重要,这个数据整理器会将图转换为用于训练的 batch 数据。 ```python train_results = trainer.train() trainer.push_to_hub() ``` 训练完后,可以使用 `push_to_hub` 将模型与所有其他训练相关信息一起保存到 hub。 由于此模型比较大,因此在 CPU (Intel Core i7) 上训练/微调 20 个 epoch 大约需要一天时间。想要更快点的话,你可以使用强大的 GPU 和并行化方法,你只需在 Colab notebook 中或直接在你选择的其他集群上启动代码即可。 ## 结束语 现在你已经知道如何使用 `transformers` 来训练图分类模型,我们希望你尝试在 Hub 上分享你最喜欢的图 transformer 模型的 checkpoints、模型以及数据集,以供社区的其他人使用!
blog/zh/graphml-classification.md/0
{ "file_path": "blog/zh/graphml-classification.md", "repo_id": "blog", "token_count": 5420 }
52
--- title: "使用英特尔 Sapphire Rapids 加速 PyTorch Transformers 模型(第一部分)" thumbnail: /blog/assets/124_intel_sapphire_rapids/02.png authors: - user: juliensimon translators: - user: MatrixYao - user: inferjay proofreader: true --- # 使用英特尔 Sapphire Rapids 加速 PyTorch Transformers 模型(第一部分) 大约一年以前,我们 [展示](https://huggingface.co/blog/accelerating-pytorch) 了如何在第三代 [英特尔至强可扩展](https://www.intel.com/content/www/us/en/products/details/processors/xeon/scalable.html) CPU(即 Ice Lake)集群上分布式训练 Hugging Face transformers 模型。最近,英特尔发布了代号为 Sapphire Rapids 的第四代至强可扩展 CPU,该 CPU 包含了令人兴奋的深度学习加速新指令。 通过本文,你将会学到如何在一个 AWS Sapphire Rapids 集群上加速一个 PyTorch 训练任务。我们会使用 [英特尔 oneAPI 集合通信库](https://www.intel.com/content/www/us/en/developer/tools/oneapi/oneccl.html)(oneAPI Collective Communications Library, oneCCL)来分布式化训练任务,并使用 [英特尔 PyTorch 扩展库](https://github.com/intel/intel-extension-for-pytorch)(Intel Extension for PyTorch,IPEX)来自动使用新指令进行性能优化。因为这两个库均已集成入 Hugging Face transformers 库,我们能够做到在不修改一行代码的前提下开箱即用地运行我们的示例代码。 在随后的另一篇文章里,我们还会探讨如何使用 Sapphire Rapids CPU 进行推理及其性能提升。 ## 为何你应该考虑在 CPU 上训练 在英特尔至强 CPU 上训练一个深度学习模型是一个性价比高且可扩展的方案,在使用分布式训练或者在小数据集或中等数据集上微调模型时尤其如此。 至强 CPU 支持一些先进的特性,如 512 位先进矢量扩展(Advanced Vector Extensions,[AVX-512](https://en.wikipedia.org/wiki/AVX-512))以及超线程(Hyper-Threading),这些特性提高了深度学习模型的并行性和效率,使得我们可以在得到更好的硬件资源使用率的同时训练得更快。 另外,一般而言,相比用于训练大型深度学习模型的专门硬件如 GPU 等而言,至强 CPU 更便宜和易得。至强 CPU 还更容易用于其他生产任务,从网络服务到数据库不一而足,这使得它们成为 IT 基础设施的一个万用且灵活的选择。 最后,云用户还可以通过使用 spot 实例的方式进一步降低在至强 CPU 上的训练成本。Spot 实例使用空闲计算资源,因此以折扣价售卖。与按需实例相比,spot 实例提供了高至 90% 的显著的成本节约。最后同样重要的是,CPU spot 实例一般来讲比 GPU 实例更容易获得。 现在,让我们看一下 Sapphire Rapids 架构引入的新指令。 ## 先进矩阵扩展 (AMX):深度学习新指令 Sapphire Rapids 架构引入了英特尔先进矩阵扩展(Advanced Matrix Extensions, [AMX](https://en.wikipedia.org/wiki/Advanced_Matrix_Extensions))用于加速深度学习工作负载。用户只需安装最新版本的 IPEX 即可受益于新指令,无需更改任何 Hugging Face 代码。 AMX 指令用于加速矩阵乘法,该操作是深度学习批量训练的核心操作。AMX 指令支持 Brain 浮点([BF16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format))和 8 比特整型(INT8)数据类型,覆盖不同训练场景的加速需求。 AMX 指令引入了新的 2 维 CPU 寄存器,称作 tile 寄存器。因为这些寄存器在上下文切换时需要保存和恢复,所以需要内核相关支持。在 Linux 上,内核版本需要在 [v5.16](https://discourse.ubuntu.com/t/kinetic-kudu-release-notes/27976) 及以上方可支持。 现在,让我们看看怎样构建一个 Sapphire Rapids CPU 集群用于分布式训练。 ## 构建一个 Sapphire Rapids CPU 集群 截至本文撰写之时,使用 Sapphire Rapids 服务器的最简单的方式是使用新的亚马逊 EC2 [R7iz](https://aws.amazon.com/ec2/instance-types/r7iz/) 实例家族。由于它尚在预览期,你必须 [登记注册](https://pages.awscloud.com/R7iz-Preview.html) 以获得访问权限。另外,虚拟机尚未支持 AMX,因此,我们将使用裸金属实例(`r7iz.metal-16xl`, 64 vCPU, 512GB RAM)。 为避免手动设置集群中的每个节点,我们首先建立一个主节点并依此创建一个新的亚马逊机器镜像(Amazon Machine Image,[AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html))。然后,我们用这个 AMI 启动其他节点。 从网络的角度,我们需要如下设置: * 打开 22 端口,用于所有实例上的 ssh 访问创建和调试 * 配置从主实例(你启动训练的那个实例)到所有其他实例(包含主实例本身)的 [免密 ssh 访问](https://www.redhat.com/sysadmin/passwordless-ssh)。换句话说,主节点的 ssh 公钥必须在所有阶段上被授权 * 允许集群内的所有网络通信,使得分布式训练可以不受阻碍地运行。AWS 提供了 [安全组](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html) 这一安全便捷的方式支持这个功能。我们只需创建一个安全组,确保所有集群内的实例属于同一安全组,并允许同一安全组内的所有网络通信即可,以下是我使用的设置: <kbd> <img src="../assets/124_intel_sapphire_rapids/01.png"> </kbd> 让我们开始创建集群的主节点。 ## 设置主节点 我们首先启动一个安装了 Ubuntu 20.04 AMI(`ami-07cd3e6c4915b2d18`)并加入了我们之前创建的安全组的 `r7iz.metal-16xl` 实例,用于创建主节点。该 AMI 虽然只包含了 Linux v5.15.0,但是幸运的是英特尔和 AWS 已经为这个内核版本打上了 AMX 支持的补丁。因此,我们不需要升级内核至 v5.16。 一旦实例运行起来后,我们 ssh 登录上它并通过 `lscpu` 命令检查 AMX 是否确实已被支持。你应该会在 flags 部分看到如下内容: ``` amx_bf16 amx_tile amx_int8 ``` 然后,我们开始安装本地依赖以及 Python 依赖。 ``` sudo apt-get update # Install tcmalloc for extra performance (https://github.com/google/tcmalloc) sudo apt install libgoogle-perftools-dev -y # Create a virtual environment sudo apt-get install python3-pip -y pip install pip --upgrade export PATH=/home/ubuntu/.local/bin:$PATH pip install virtualenv # Activate the virtual environment virtualenv cluster_env source cluster_env/bin/activate # Install PyTorch, IPEX, CCL and Transformers pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/cpu pip3 install intel_extension_for_pytorch==1.13.0 -f https://developer.intel.com/ipex-whl-stable-cpu pip3 install oneccl_bind_pt==1.13 -f https://developer.intel.com/ipex-whl-stable-cpu pip3 install transformers==4.24.0 # Clone the transformers repository for its example scripts git clone https://github.com/huggingface/transformers.git cd transformers git checkout v4.24.0 ``` 接着,我们使用 `ssh-keygen` 创建一个新的 ssh 密钥对,命名为 `cluster`,并保存在缺省位置(`~/.ssh`)。 最后,我们用该实例创建一个 [新的 AMI](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami-ebs.html)。 ## 设置集群 一旦 AMI 准备就绪,我们用它启动另外 3 个 `r7iz.16xlarge-metal` 实例,不要忘了把他们加入之前创建的安全组中。 当这些实例启动的时候,我们 ssh 登录进主节点并完成网络设置。首先,我们编辑位于 `~/.ssh/config` 的 ssh 配置文件,使其支持从主节点到其他节点的免密连接,这里我们只需使用它们各自的私有 IP 及之前创建的密钥对即可。以下是我的配置文件。 ``` Host 172.31.*.* StrictHostKeyChecking no Host node1 HostName 172.31.10.251 User ubuntu IdentityFile ~/.ssh/cluster Host node2 HostName 172.31.10.189 User ubuntu IdentityFile ~/.ssh/cluster Host node3 HostName 172.31.6.15 User ubuntu IdentityFile ~/.ssh/cluster ``` 到此为止,我们可以使用 `ssh node [1-3]` 去免密连接任何节点。 在主节点侧,我们创建一个 `~/hosts` 文件,并填入集群中所有节点的名称,这些名称已在上面的 ssh 配置文件中定义。我们用 `localhost` 代表主节点,因为我们会在该节点启动训练脚本。我的文件如下所示。 ``` localhost node1 node2 node3 ``` 集群现已准备就绪。让我们开始训练吧! ## 启动一个分布式训练任务 在本例中,我们将在 [SQUAD](https://huggingface.co/datasets/squad) 数据集上微调一个用于问答的 [DistilBERT](https://huggingface.co/distilbert-base-uncased) 模型。如果你想试试别的示例的话,尽管去做吧。 ``` source ~/cluster_env/bin/activate cd ~/transformers/examples/pytorch/question-answering pip3 install -r requirements.txt ``` 我们首先冒个烟,启动一个单实例训练任务。请注意如下几个重要的标志变量: * `no_cuda` 确保使用 CPU 进行训练,忽略 GPU * `use_ipex` 使能 IPEX 库,确保 AMX 和 AVX 指令的使用 * `bf16` 使能 BF16 训练 ``` export LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so" python run_qa.py --model_name_or_path distilbert-base-uncased \ --dataset_name squad --do_train --do_eval --per_device_train_batch_size 32 \ --num_train_epochs 1 --output_dir /tmp/debug_squad/ \ --use_ipex --bf16 --no_cuda ``` 不必等到任务完成,我们只运行 1 分钟用于确保所有的依赖已被正常安装。同时,这也给了我们一个单实例训练的基线性能:1 个 epoch 花费大约 **26 分钟**。供参考,我们测量了同样的任务在一个相当的 Ice Lake 实例(`c6i.16xlarge`)上的性能,基于相同的软件设置,每个 epoch 需要 **3 小时 30 分钟**。加速比达到 **8 倍**。我们已经能看到新指令带来的好处! 现在,让我们把训练任务分布式部署到 4 个实例上。一个 `r7iz.16xlarge` 实例有 32 个物理 CPU 核,我们倾向于直接使用物理核而不是虚拟核(vCPUs)(`KMP_HW_SUBSET=1T`)。我们决定分配 24 个核用于训练(`OMP_NUM_THREADS`),2 个核用于集合通信(`CCL_WORKER_COUNT`),剩下的 6 个核给内核和其他进程使用。这 24 个训练线程分配给 2 个 Python 进程使用(`NUM_PROCESSES_PER_NODE`)。因此,一个 4 节点的集群上共有 8(`NUM_PROCESSES`)个 Python 进程。 ``` # Set up environment variables for CCL oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") source $oneccl_bindings_for_pytorch_path/env/setvars.sh export MASTER_ADDR=172.31.3.190 export NUM_PROCESSES=8 export NUM_PROCESSES_PER_NODE=2 export CCL_WORKER_COUNT=2 export CCL_WORKER_AFFINITY=auto export KMP_HW_SUBSET=1T ``` 现在,我们启动分布式训练任务。 ``` # Launch distributed training mpirun -f ~/hosts \ -n $NUM_PROCESSES -ppn $NUM_PROCESSES_PER_NODE \ -genv OMP_NUM_THREADS=24 \ -genv LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so" \ python3 run_qa.py \ --model_name_or_path distilbert-base-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 32 \ --num_train_epochs 1 \ --output_dir /tmp/debug_squad/ \ --overwrite_output_dir \ --no_cuda \ --xpu_backend ccl \ --bf16 ``` 现在,一个 epoch 仅需 **7 分 30 秒**。 任务如下图所示。图的上半部分是主节点,同时你也可以看到其他 3 个节点每个均有 2 个训练进程在运行。 <kbd> <img src="../assets/124_intel_sapphire_rapids/02.png"> </kbd> 4 节点的完美线性扩展需要 6 分 30 秒的训练时间(26 分钟除以 4)。我们非常接近于这个理想值,这充分展现了该方法很高的扩展性。 ## 结论 如你所见,在一个英特尔至强集群上训练 Hugging Face transformers 模型是一个灵活,可扩展且性价比高的解决方案,特别是在你的数据集和模型是小尺寸或者中等尺寸情况下。 以下列出了一些其他可帮助你起步的资源: * [Intel IPEX](https://github.com/intel/intel-extension-for-pytorch) GitHub * Hugging Face 文档: "[Efficient training on CPU](https://huggingface.co/docs/transformers/perf_train_cpu)" 及 "[Efficient training on many CPUs](https://huggingface.co/docs/transformers/perf_train_cpu_many)" 如你有任何问题或反馈,请通过 Hugging Face 论坛告诉我们。 感谢阅读!
blog/zh/intel-sapphire-rapids.md/0
{ "file_path": "blog/zh/intel-sapphire-rapids.md", "repo_id": "blog", "token_count": 7344 }
53
--- title: 如何使用 Megatron-LM 训练语言模型 thumbnail: /blog/assets/100_megatron_training/thumbnail.png authors: - user: loubnabnl translators: - user: gxy-gxy - user: zhongdongy proofreader: true --- # 如何使用 Megatron-LM 训练语言模型 在 PyTorch 中训练大语言模型不仅仅是写一个训练循环这么简单。我们通常需要将模型分布在多个设备上,并使用许多优化技术以实现稳定高效的训练。Hugging Face 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 的创建是为了支持跨 GPU 和 TPU 的分布式训练,并使其能够非常容易的集成到训练代码中。🤗 [Transformers](https://huggingface.co/docs/transformers/index) 还支持使用 [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.Trainer) API 来训练,其在 PyTorch 中提供功能完整的训练接口,甚至不需要自己编写训练的代码。 [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) 是研究人员用于预训练大型 Transformer 模型的另一个流行工具,它是 NVIDIA 应用深度学习研究团队开发的一个强大框架。与 `accelerate` 和 `Trainer` 不同,Megatron-LM 使用起来并不简单,对于初学者来说可能难以上手。但它针对 GPU 上的训练进行了高度优化。在这篇文章中,你将学习如何使用 Megatron-LM 框架在 NVIDIA GPU 上训练语言模型,并将其与 `transformers` 结合。 我们将分解在此框架中训练 GPT2 模型的不同步骤,包括: - 环境设置 - 数据预处理 - 训练 - 将模型转化为 🤗 Transformers ## 为什么选择 Megatron-LM? 在进入训练细节的讲解之前,让我们首先了解是什么让这个框架比其他框架更高效。本节的灵感来自这篇关于使用 [Megatron-DeepSpeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed) 进行 BLOOM 训练的精彩 [博客](https://huggingface.co/blog/zh/bloom-megatron-deepspeed),请参阅该博客以获取更多详细信息,因为该博客旨在对 Megatron-LM 进行详细的介绍。 ### 数据加载 Megatron-LM 带有一个高效的 DataLoader,其中数据在训练前被 tokenize 和 shuffle。它还将数据拆分为带有索引的编号序列,并将索引存储,因此 tokenize 只需要计算一次。为了构建索引,首先根据训练参数计算每个 epoch 的数量,并创建一个排序,然后对数据进行 shuffle 操作。这与大多数情况不同,我们通常迭代整个数据集直到其用尽,然后重复第二个 epoch 。这平滑了学习曲线并节省了训练时间。 ### 融合 CUDA 内核 当一个计算在 GPU 上运行时,必要的数据会从内存中取出并加载到 GPU 上,然后计算结果被保存回内存。简单来说,融合内核的思想是: 将通常由 PyTorch 单独执行的类似操作组合成一个单独的硬件操作。因此可以将多个离散计算合并为一个,从而减少在多个离散计算中的内存移动次数。下图说明了内核融合的思想。它的灵感来自这篇 [论文](https://www.arxiv-vanity.com/papers/1305.1183/),该论文详细讨论了这个概念。 <p align="center"> <img src="/blog/assets/100_megatron_training/kernel_fusion.png" width="600" /> </p> 当 f、g 和 h 融合在一个内核中时,f 和 g 的中间结果 x' 和 y' 存储在 GPU 寄存器中并立即被 h 使用。但是如果不融合,x' 和 y' 就需要复制到内存中,然后由 h 加载。因此,融合 CUDA 内核显着加快了计算速度。此外,Megatron-LM 还使用 [Apex](https://github.com/NVIDIA/apex) 的 AdamW 融合实现,它比 PyTorch 实现更快。 虽然我们可以在 `transformers` 中自定义 Megatron-LM 中的 DataLoader 和 Apex 的融合优化器,但自定义融合 CUDA 内核对新手来说太不友好了。 现在你已经熟悉了该框架及其优势,让我们进入训练细节吧! ## 如何使用 Megatron-LM 框架训练? ### 环境设置 设置环境的最简单方法是从 [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) 拉取附带所有所需环境的 NVIDIA PyTorch 容器。有关详细信息,请参阅 [文档](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html)。如果你不想使用此容器,则需要安装最新的 pytorch、cuda、nccl 和 NVIDIA [APEX](https://github.com/NVIDIA/apex#quick-start) 版本和 `nltk` 库。 在安装完 Docker 之后,你可以使用以下命令运行容器 (`xx.xx` 表示你的 Docker 版本),然后在其中克隆 [Megatron-LM 库](https://github.com/NVIDIA/Megatron-LM): ```bash docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:xx.xx-py3 git clone https://github.com/NVIDIA/Megatron-LM ``` 你还需要在容器的 Megatron-LM 文件夹中添加分词器的词汇文件 `vocab.json` 和合并表 `merges.txt`。这些文件可以在带有权重的模型仓库中找到,请参阅 [GPT2 库](https://huggingface.co/gpt2/tree/main)。你还可以使用 `transformers` 训练自己的分词器。你可以查看 [CodeParrot 项目](https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot) 以获取实际示例。现在,如果你想从容器外部复制这些数据,你可以使用以下命令: ```bash sudo docker cp vocab.json CONTAINER_ID:/workspace/Megatron-LM sudo docker cp merges.txt CONTAINER_ID:/workspace/Megatron-LM ``` ### 数据预处理 在本教程的其余部分,我们将使用 [CodeParrot](https://huggingface.co/codeparrot/codeparrot-small) 模型和数据作为示例。 我们需要对预训练数据进行预处理。首先,你需要将其转换为 json 格式,一个 json 的一行包含一个文本样本。如果你正在使用 🤗 [Datasets](https://huggingface.co/docs/datasets/index),这里有一个关于如何做到这一点的例子 (请在 Megatron-LM 文件夹中进行这些操作): ```python from datasets import load_dataset train_data = load_dataset('codeparrot/codeparrot-clean-train', split='train') train_data.to_json("codeparrot_data.json", lines=True) ``` 然后使用以下命令将数据 tokenize、shuffle 并处理成二进制格式以进行训练: ```bash #if nltk isn't installed pip install nltk python tools/preprocess_data.py \ --input codeparrot_data.json \ --output-prefix codeparrot \ --vocab vocab.json \ --dataset-impl mmap \ --tokenizer-type GPT2BPETokenizer \ --merge-file merges.txt \ --json-keys content \ --workers 32 \ --chunk-size 25 \ --append-eod ``` `workers` 和 `chunk_size` 选项指的是预处理中使用的线程数量和分配给每个线程的数据块大小。`dataset-impl` 指的是索引数据集的实现方式,包括 ['lazy', 'cached', 'mmap']。这将输出 `codeparrot_content_document.idx` 和 `codeparrot_content_document.bin` 两个文件用于训练。 ### 训练 你可以使用如下所示配置模型架构和训练参数,或将其放入你将运行的 bash 脚本中。该命令在 8 个 GPU 上参数为 110M 的 CodeParrot 模型进行预训练。请注意,数据默认按 969:30:1 的比例划分为训练/验证/测试集。 ```bash GPUS_PER_NODE=8 MASTER_ADDR=localhost MASTER_PORT=6001 NNODES=1 NODE_RANK=0 WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES)) DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT" CHECKPOINT_PATH=/workspace/Megatron-LM/experiments/codeparrot-small VOCAB_FILE=vocab.json MERGE_FILE=merges.txt DATA_PATH=codeparrot_content_document GPT_ARGS="--num-layers 12 --hidden-size 768 --num-attention-heads 12 --seq-length 1024 --max-position-embeddings 1024 --micro-batch-size 12 --global-batch-size 192 --lr 0.0005 --train-iters 150000 --lr-decay-iters 150000 --lr-decay-style cosine --lr-warmup-iters 2000 --weight-decay .1 --adam-beta2 .999 --fp16 --log-interval 10 --save-interval 2000 --eval-interval 200 --eval-iters 10 " TENSORBOARD_ARGS="--tensorboard-dir experiments/tensorboard" python3 -m torch.distributed.launch $DISTRIBUTED_ARGS \ pretrain_gpt.py \ --tensor-model-parallel-size 1 \ --pipeline-model-parallel-size 1 \ $GPT_ARGS \ --vocab-file $VOCAB_FILE \ --merge-file $MERGE_FILE \ --save $CHECKPOINT_PATH \ --load $CHECKPOINT_PATH \ --data-path $DATA_PATH \ $TENSORBOARD_ARGS ``` 使用以上设置,训练大约需要 12 个小时。 该设置使用数据并行,但也可以对无法放在单个 GPU 的超大模型使用模型并行。第一种选择是设置张量并行,它将模型中的张量拆分到多个 GPU 上并行运算,你需要将 `tensor-model-parallel-size` 参数更改为所需的 GPU 数量。第二种选择是流水线并行,它将模型按层分成大小相等的几块。参数 `pipeline-model-parallel-size` 表示将模型分成的块数。有关详细信息,请参阅此 [博客](https://huggingface.co/blog/zh/bloom-megatron-deepspeed) ### 将模型转换为 🤗 Transformers 训练结束后,我们希望使用 `transformers` 库对该模型进行评估或将其部署到生产环境中。你可以按照 [教程](https://huggingface.co/nvidia/megatron-gpt2-345m) 将其转换为 `transformers` 模型。例如,在训练完成后,你可以复制第 150k 次迭代的权重,并使用以下命令将文件 `model_optim_rng.pt` 转换为 `transformers` 支持的 `pytorch_model.bin` 文件: ```bash # to execute outside the container: mkdir -p nvidia/megatron-codeparrot-small # copy the weights from the container sudo docker cp CONTAINER_ID:/workspace/Megatron-LM/experiments/codeparrot-small/iter_0150000/mp_rank_00/model_optim_rng.pt nvidia/megatron-codeparrot-small git clone https://github.com/huggingface/transformers.git git clone https://github.com/NVIDIA/Megatron-LM.git export PYTHONPATH=Megatron-LM python transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py nvidia/megatron-codeparrot-small/model_optim_rng.pt ``` 请注意,如果你打算使用原始的分词器,你将需要在转换后将生成的词汇文件和合并表替换为我们之前介绍的原始文件。 不要忘记将你的模型推送到 hub 并与社区分享,只需三行代码 🤗: ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("nvidia/megatron-codeparrot-small") # this creates a repository under your username with the model name codeparrot-small model.push_to_hub("codeparrot-small") ``` 你还可以轻松地使用它来生成文本: ```python from transformers import pipeline pipe = pipeline("text-generation", model="your_username/codeparrot-small") outputs = pipe("def hello_world():") print(outputs[0]["generated_text"]) ``` ``` def hello_world(): print("Hello World!") ``` Transformers 还可以有效地处理大模型推理。如果你训练了一个非常大的模型 (例如训练时使用了模型并行),你可以通过以下命令轻松地进行推理: ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("your_username/codeparrot-large", device_map="auto") ``` 这将在内部调用 [accelerate 库](https://huggingface.co/docs/accelerate/index) 自动在你可用的设备 (GPU、CPU RAM) 之间分配模型权重。 免责声明: 我们已经证明任何人都可以使用 Megatron-LM 来训练语言模型。问题是我们需要考虑什么时候使用它。由于额外的预处理和转换步骤,这个框架显然增加了一些时间开销。因此,重要的是你要考虑哪个框架更适合你的需求和模型大小。我们建议将其用于预训练模型或微调,但可能不适用于中型模型的微调。 `APITrainer` 和 `accelerate` 库对于模型训练同样也非常方便,并且它们与设备无关,为用户提供了极大的灵活性。 恭喜 🎉 现在你学会了如何在 Megatron-LM 框架中训练 GPT2 模型并使其支持 `transformers`!
blog/zh/megatron-training.md/0
{ "file_path": "blog/zh/megatron-training.md", "repo_id": "blog", "token_count": 6817 }
54
--- title: "Optimum + ONNX Runtime: 更容易、更快地训练你的 Hugging Face 模型" thumbnail: /blog/assets/optimum_onnxruntime-training/thumbnail.png authors: - user: Jingya - user: kshama-msft guest: true - user: askhade guest: true - user: weicwang guest: true - user: zhijiang guest: true translators: - user: AIBoy1993 --- # Optimum + ONNX Runtime: 更容易、更快地训练你的 Hugging Face 模型 ## 介绍 基于语言、视觉和语音的 Transformer 模型越来越大,以支持终端用户复杂的多模态用例。增加模型大小直接影响训练这些模型所需的资源,并随着模型大小的增加而扩展它们。Hugging Face 和微软的 ONNX Runtime 团队正在一起努力,在微调大型语言、语音和视觉模型方面取得进步。Hugging Face 的 [🤗 Optimum 库](https://huggingface.co/docs/optimum/index),通过和 ONNX Runtime 的集成进行训练,为许多流行的 Hugging Face 模型提供了一个开放的解决方案,可以将**训练时间缩短 35% 或更多**。我们展现了 Hugging Face Optimum 和 ONNX Runtime Training 生态系统的细节,性能数据突出了使用 Optimum 库的好处。 ## 性能测试结果 下面的图表表明,当**使用 ONNX Runtime 和 DeepSpeed ZeRO Stage 1**进行训练时,用 Optimum 的 Hugging Face 模型的加速**从 39% 提高到 130%**。性能测试的基准运行是在选定的 Hugging Face PyTorch 模型上进行的,第二次运行是只用 ONNX Runtime 训练,最后一次运行是 ONNX Runtime + DeepSpeed ZeRO Stage 1,图中显示了最大的收益。基线 PyTorch 运行所用的优化器是 AdamW Optimizer,ORT 训练用的优化器是 Fused Adam Optimizer。这些运行是在带有 8 个 GPU 的单个 NVIDIA A100 节点上执行的。 <figure class="image table text-center m-0 w-full"> <img src="../assets/optimum_onnxruntime-training/onnxruntime-training-benchmark.png" alt="Optimum-onnxruntime Training Benchmark"/> </figure> 更多关于开启 🤗 Optimum 进行训练加速的配置细节可以在[指南](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/trainer)中找到。用于这些运行的版本信息如下: ``` PyTorch: 1.14.0.dev20221103+cu116; ORT: 1.14.0.dev20221103001+cu116; DeepSpeed: 0.6.6; HuggingFace: 4.24.0.dev0; Optimum: 1.4.1.dev0; Cuda: 11.6.2 ``` ## Optimum 库 Hugging Face 是一个快速发展的开放社区和平台,旨在将优秀的机器学习大众化。随着 [🤗 Transformers 库](https://huggingface.co/docs/transformers/index) 的成功,我们将模态从 NLP 扩展到音频和视觉,现在涵盖了跨机器学习的用例,以满足我们社区的需求。现在在 [Hugging Face Hub](https://huggingface.co/models) 上,有超过 12 万个免费和可访问的模型 checkpoints 用于各种机器学习任务,1.8 万个数据集和 2 万个机器学习演示应用。然而,将 Transformer 模型扩展到生产中仍然是工业界的一个挑战。尽管准确性很高,但基于 Transformer 的模型的训练和推理可能耗时且昂贵。 为了满足这些需求,Hugging Face 构建了两个开源库: **🤗 Accelerate** 和 **🤗 Optimum**。[🤗 Accelerate](https://huggingface.co/docs/accelerate/index) 专注于开箱即用的分布式训练,而 [🤗 Optimum](https://huggingface.co/docs/optimum/index) 作为 Transformer 的扩展,通过利用用户目标硬件的最大效率来加速模型训练和推理。Optimum 集成了机器学习加速器如 ONNX Runtime,和专业的硬件如英特尔的 [Habana Gaudi](https://huggingface.co/blog/habana-gaudi-2-benchmark),因此用户可以从训练和推理的显著加速中受益。此外,🤗 Optimum 无缝集成了其他 Hugging Face 的工具,同时继承了 Transformer 的易用性。开发人员可以轻松地调整他们的工作,以更少的计算能力实现更低的延迟。 ## ONNX Runtime 训练 [ONNX Runtime](https://onnxruntime.ai/) 加速[大型模型训练](https://onnxruntime.ai/docs/get-started/training-pytorch.html),单独使用时将吞吐量提高40%,与 [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) 组合后将吞吐量提高130%,用于流行的基于 Hugging Face Transformer 的模型。ONNX Runtime 已经集成为 🤗 Optimum 的一部分,并通过 Hugging Face 的 🤗 Optimum 训练框架实现更快的训练。 ONNX Runtime Training 通过一些内存和计算优化实现了这样的吞吐量改进。内存优化使 ONNX Runtime 能够最大化批大小并有效利用可用的内存,而计算优化则加快了训练时间。这些优化包括但不限于,高效的内存规划,内核优化,适用于 Adam 优化器的多张量应用 (将应用于所有模型参数的按元素更新分批到一个或几个内核启动中),FP16 优化器 (消除了大量用于主机内存拷贝的设备),混合精度训练和图优化,如节点融合和节点消除。ONNX Runtime Training 支持 [NVIDIA](https://techcommunity.microsoft.com/t5/ai-machine-learning-blog/accelerate-pytorch-transformer-model-training-with-onnx-runtime/ba-p/2540471) 和 [AMD GPU](https://cloudblogs.microsoft.com/opensource/2021/07/13/onnx-runtime-release-1-8-1-previews-support-for-accelerated-training-on-amd-gpus-with-the-amd-rocm-open-software-platform/),并提供自定义操作的可扩展性。 简而言之,它使 AI 开发人员能够充分利用他们熟悉的生态系统,如 PyTorch 和 Hugging Face,并在他们选择的目标设备上使用 ONNX Runtime 进行加速,以节省时间和资源。 ## Optimum 中的 ONNX Runtime Training Optimum 提供了一个 `ORTTrainer` API,它扩展了 Transformer 中的 `Trainer`,以使用 ONNX Runtime 作为后端进行加速。`ORTTrainer` 是一个易于使用的 API,包含完整的训练循环和评估循环。它支持像超参数搜索、混合精度训练和多 GPU 分布式训练等功能。`ORTTrainer` 使 AI 开发人员在训练 Transformer 模型时能够组合 ONNX Runtime 和其他第三方加速技术,这有助于进一步加速训练,并充分发挥硬件的作用。例如,开发人员可以将 ONNX Runtime Training 与 Transformer 训练器中集成的分布式数据并行和混合精度训练相结合。此外,`ORTTrainer` 使你可以轻松地将 DeepSpeed ZeRO-1 和 ONNX Runtime Training 组合,通过对优化器状态进行分区来节省内存。在完成预训练或微调后,开发人员可以保存已训练的 PyTorch 模型,或使用 🤗 Optimum 实现的 API 将其转为 ONNX 格式,以简化推理的部署。和 `Trainer` 一样,`ORTTrainer` 与 Hugging Face Hub 完全集成: 训练结束后,用户可以将他们的模型 checkpoints 上传到 Hugging Face Hub 账户。 因此具体来说,用户应该如何利用 ONNX Runtime 加速进行训练?如果你已经在使用 `Trainer`,你只需要修改几行代码就可以从上面提到的所有改进中受益。主要有两个替换需要应用。首先,将 `Trainer` 替换为 `ORTTrainer``,然后将 `TrainingArguments` 替换为 `ORTTrainingArguments`,其中包含训练器将用于训练和评估的所有超参数。`ORTTrainingArguments` 扩展了 `TrainingArguments`,以应用 ONNX Runtime 授权的一些额外参数。例如,用户可以使用 Fused Adam 优化器来获得额外的性能收益。下面是一个例子: ```diff -from transformers import Trainer, TrainingArguments +from optimum.onnxruntime import ORTTrainer, ORTTrainingArguments # Step 1: Define training arguments -training_args = TrainingArguments( +training_args = ORTTrainingArguments( output_dir="path/to/save/folder/", - optim = "adamw_hf", + optim = "adamw_ort_fused", ... ) # Step 2: Create your ONNX Runtime Trainer -trainer = Trainer( +trainer = ORTTrainer( model=model, args=training_args, train_dataset=train_dataset, + feature="sequence-classification", ... ) # Step 3: Use ONNX Runtime for training!🤗 trainer.train() ``` ## 展望未来 Hugging Face 团队正在开源更多的大型模型,并通过训练和推理的加速工具以降低用户从模型中获益的门槛。我们正在与 ONNX Runtime Training 团队合作,为更新和更大的模型架构带来更多的训练优化,包括 Whisper 和 Stable Diffusion。微软还将其最先进的训练加速技术打包在 [PyTorch 的 Azure 容器](https://techcommunity.microsoft.com/t5/ai-machine-learning-blog/enabling-deep-learning-with-azure-container-for-pytorch-in-azure/ba-p/3650489)中。这是一个轻量级的精心营造的环境,包括 DeepSpeed 和 ONNX Runtime,以提高 AI 开发者使用 PyTorch 训练的生产力。除了大型模型训练外,ONNX Runtime Training 团队还在为边缘学习构建新的解决方案——在内存和电源受限的设备上进行训练。 ## 准备开始 我们邀请你查看下面的链接,以了解更多关于 Hugging Face 模型的 Optimum ONNX Runtime Training,并开始使用。 * [Optimum ONNX Runtime Training 文档](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/trainer) * [Optimum ONNX Runtime Training 示例](https://github.com/huggingface/optimum/tree/main/examples/onnxruntime/training) * [Optimum Github 仓库](https://github.com/huggingface/optimum/tree/main) * [ONNX Runtime Training 示例](https://github.com/microsoft/onnxruntime-training-examples/) * [ONNX Runtime Training Github 仓库](https://github.com/microsoft/onnxruntime/tree/main/orttraining) * [ONNX Runtime](https://onnxruntime.ai/) * [DeepSpeed](https://www.deepspeed.ai/) 和 [ZeRO](https://www.deepspeed.ai/tutorials/zero/) 教程 * [PyTorch 的 Azure 容器](https://techcommunity.microsoft.com/t5/ai-machine-learning-blog/enabling-deep-learning-with-azure-container-for-pytorch-in-azure/ba-p/3650489) 🏎感谢阅读!如果你有任何问题,请通过 [Github](https://github.com/huggingface/optimum/issues) 或[论坛](https://discuss.huggingface.co/c/optimum/)随时联系我们。你也可以在 [Twitter](https://twitter.com/Jhuaplin) 或 [LinkedIn](https://www.linkedin.com/in/jingya-huang-96158b15b/) 上联系我。
blog/zh/optimum-onnxruntime-training.md/0
{ "file_path": "blog/zh/optimum-onnxruntime-training.md", "repo_id": "blog", "token_count": 5597 }
55
--- title: "Ryght 在 Hugging Face 专家助力下赋能医疗保健和生命科学之旅" thumbnail: /blog/assets/ryght-case-study/thumbnail.png authors: - user: andrewrreed - user: johnnybio guest: true org: RyghtAI translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # Ryght 在 Hugging Face 专家助力下赋能医疗保健和生命科学之旅 > [!NOTE] 本文是 Ryght 团队的客座博文。 ## Ryght 是何方神圣? Ryght 的使命是构建一个专为医疗保健和生命科学领域量身定制的企业级生成式人工智能平台。最近,公司正式公开了 [Ryght 预览版](https://www.ryght.ai/signup?utm_campaign=Preview%20Launch%20April%2016%2C%2024&utm_source=Huggging%20Face%20Blog%20-%20Preview%20Launch%20Sign%20Up) 平台。 当前,生命科学公司不断地从各种不同来源 (实验室数据、电子病历、基因组学、保险索赔、药学、临床等) 收集大量数据,并期望从中获取洞见。但他们分析这些数据的方法已经跟不上数据本身,目前典型的工作模式往往需要一个大型团队来完成从简单查询到开发有用的机器学习模型的所有工作。这一模式已无法满足药物开发、临床试验以及商业活动对可操作知识的巨大需求,更别谈精准医学的兴起所带来的更大的需求了。 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/ryght-case-study/click-through.gif" alt="Ryght Laptop" style="width: 90%; height: auto;"><br> </p> [Ryght](https://hubs.li/Q02sLGKL0) 的目标是让生命科学专业人士能够快速、安全地从数据中挖掘出他们所需的洞见。为此,其正在构建一个 SaaS 平台,为本专业的人员和组织提供定制的 AI copilot 解决方案,以助力他们对各种复杂数据源进行记录、分析及研究。 Ryght 认识到 AI 领域节奏快速且多变的特点,因此一开始就加入 [Hugging Face 专家支持计划](https://huggingface.co/support),将 Hugging Face 作为技术咨询合作伙伴。 ## 共同克服挑战 > ##### _我们与 Hugging Face 专家支持计划的合作对加快我们生成式人工智能平台的开发起到了至关重要的作用。快速发展的人工智能领域有可能彻底改变我们的行业,而 Hugging Face 的高性能、企业级的文本生成推理 (TGI) 和文本嵌入推理 (TEI) 服务本身就是游戏规则的改写者。 - [Johnny Crupi](https://www.linkedin.com/in/johncrupi/),[Ryght 首席技术官](http://www.ryght.ai/?utm_campaign=hf&utm_source=hf_blog)_ 在着手构建生成式人工智能平台的过程中,Ryght 面临着多重挑战。 ### 1. 快速提升团队技能并在多变的环境中随时了解最新情况 随着人工智能和机器学习技术的快速发展,确保团队及时了解最新的技术、工具以及最佳实践至关重要。这一领域的学习曲线呈现出持续陡峭的特点,因此需要齐心协力才能及时跟上。 与 Hugging Face 的人工智能生态系统核心专家团队的合作,有助于 Ryght 跟上本垂直领域的最新发展以及最新模型。通过开放异步的沟通渠道、定期的咨询会以及专题技术研讨会等多种形式,充分地保证了目的的实现。 ### 2. 在众多方案中找到最 [经济] 的机器学习方案 人工智能领域充满了创新,催生了大量的工具、库、模型及方法。对于像 Ryght 这样的初创公司来说,必须消除这种噪声并确定哪些机器学习策略最适合生命科学这一独特场景。这不仅需要了解当前的技术水平,还需要对技术在未来的相关性和可扩展性有深刻的洞见。 Hugging Face 作为 Ryght 技术团队的合作伙伴,在解决方案设计、概念验证开发和生产工作负载优化全过程中提供了有力的协助,包括: 针对应用场景推荐最适合 Ryght 需求的库、框架和模型,并提供了如何使用这些软件和模型的示例。这些指导最终简化了决策过程并缩短了开发时间。 ### 3. 开发专注于安全性、隐私性及灵活性的高性能解决方案 鉴于其目标是企业级的解决方案,因此 Ryght 把安全、隐私和可治理性放在最重要的位置。因此在设计方案架构时,需要提供支持各种大语言模型 (LLM) 的灵活性,这是生命科学领域内容生成和查询处理系统的关键诉求。 基于对开源社区的快速创新,特别是医学 LLM 创新的理解,其最终采用了“即插即用”的 LLM 架构。这种设计使其能够在新 LLM 出现时能无缝地评估并集成它们。 在 Ryght 的平台中,每个 LLM 均可注册并链接至一个或多个特定于客户的推理端点。这种设计不仅可以保护各客户的连接,还提供了在不同 LLM 之间切换的能力,提供了很好的灵活性。Ryght 通过采用 Hugging Face 的 [文本生成推理 (TGI)](https://huggingface.co/docs/text-generation-inference/index) 和 [推理端点](https://huggingface.co/inference-endpoints/dedicate) 实现了该设计。 除了 TGI 之外,Ryght 还将 [文本嵌入推理 (TEI)](https://huggingface.co/docs/text-embeddings-inference/en/index) 集成到其 ML 平台中。使用 TEI 和开源嵌入模型提供服务,与仅依赖私有嵌入服务相比,可以使 Ryght 能够享受更快的推理速度、免去对速率限制的担忧,并得到可以为自己的微调模型提供服务的灵活性,而微调模型可以更好地满足生命科学领域的独特要求。 为了同时满足多个客户的需求,系统需要能处理大量并发请求,同时保持低延迟。因此,Ryght 的嵌入和推理服务不仅仅是简单的模型调用,还需要支持包括组批、排队和跨 GPU 分布式模型处理等高级特性。这些特性对于避免性能瓶颈并确保用户不会遇到延迟,从而保持最佳的系统响应时间至关重要。 ## 总结 Ryght 与 Hugging Face 在 ML 服务上的战略合作伙伴关系以及深度集成凸显了其致力于在医疗保健和生命科学领域提供尖端解决方案的承诺。通过采用灵活、安全和可扩展的架构,其确保自己的平台始终处于创新前沿,为客户提供无与伦比的服务和专业知识,以应对现代医疗领域的复杂性。 [Ryght 预览版](https://hubs.li/Q02sLFl_0) 现已作为一个可轻松上手的、免费、安全的平台向生命科学知识工作者公开,欢迎大家使用。Ryght 的 copilot 库包含各种工具,可加速信息检索、复杂非结构化数据的综合及结构化,以及文档构建等任务,把之前需要数周才能完成的工作缩短至数天或数小时。如你对定制方案及合作方案有兴趣,请联系其 [AI 专家团队](https://hubs.li/Q02sLG9V0),以讨论企业级 Ryght 服务。 如果你有兴趣了解有关 Hugging Face 专家支持计划的更多信息,请 [通过此处](https://huggingface.co/contact/sales?from=support) 联系我们,我们将联系你讨论你的需求!
blog/zh/ryght-case-study.md/0
{ "file_path": "blog/zh/ryght-case-study.md", "repo_id": "blog", "token_count": 4697 }
56
--- title: "在 SDXL 上用 T2I-Adapter 实现高效可控的文生图" thumbnail: /blog/assets/t2i-sdxl-adapters/thumbnail.png authors: - user: Adapter guest: true - user: valhalla - user: sayakpaul - user: Xintao guest: true - user: hysts translators: - user: MatrixYao - user: zhongdongy proofreader: true --- # 在 SDXL 上用 T2I-Adapter 实现高效可控的文生图 <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/hf_tencent.png" height=180/> </p> [T2I-Adapter](https://huggingface.co/papers/2302.08453) 是一种高效的即插即用模型,其能对冻结的预训练大型文生图模型提供额外引导。T2I-Adapter 将 T2I 模型中的内部知识与外部控制信号结合起来。我们可以根据不同的情况训练各种适配器,实现丰富的控制和编辑效果。 同期的 [ControlNet](https://hf.co/papers/2302.05543) 也有类似的功能且已有广泛的应用。然而,其运行所需的 **计算成本比较高**。这是因为其反向扩散过程的每个去噪步都需要运行 ControlNet 和 UNet。另外,对 ControlNet 而言,复制 UNet 编码器作为控制模型的一部分对效果非常重要,这也导致了控制模型参数量的进一步增大。因此,ControlNet 的模型大小成了生成速度的瓶颈 (模型越大,生成得越慢)。 在这方面,T2I-Adapters 相较 ControlNets 而言颇有优势。T2I-Adapter 的尺寸较小,而且,与 ControlNet 不同,T2I-Adapter 可以在整个去噪过程中仅运行一次。 | **模型** | **参数量** | **所需存储空间(fp16)** | | --- | --- | --- | | [ControlNet-SDXL](https://huggingface.co/diffusers/controlnet-canny-sdxl-1.0) | 1251 M | 2.5 GB | | [ControlLoRA](https://huggingface.co/stabilityai/control-lora) (rank = 128) | 197.78 M (参数量减少 84.19%) | 396 MB (所需空间减少 84.53%) | | [T2I-Adapter-SDXL](https://huggingface.co/TencentARC/t2i-adapter-canny-sdxl-1.0) | 79 M (**_参数量减少 93.69%_**) | 158 MB (**_所需空间减少 94%_**) | 在过去的几周里,Diffusers 团队和 T2I-Adapter 作者紧密合作,在 [`diffusers`](https://github.com/huggingface/diffusers) 库上为 [Stable Diffusion XL (SDXL)](https://huggingface.co/papers/2307.01952) 增加 T2I-Adapter 的支持。本文,我们将分享我们在从头开始训练基于 SDXL 的 T2I-Adapter 过程中的发现、漂亮的结果,以及各种条件 (草图、canny、线稿图、深度图以及 OpenPose 骨骼图) 下的 T2I-Adapter checkpoint! ![结果合辑](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/results_collage.png) 与之前版本的 T2I-Adapter (SD-1.4/1.5) 相比,[T2I-Adapter-SDXL](https://github.com/TencentARC/T2I-Adapter) 还是原来的配方,不一样之处在于,用一个 79M 的适配器去驱动 2.6B 的大模型 SDXL! T2I-Adapter-SDXL 在继承 SDXL 的高品质生成能力的同时,保留了强大的控制能力! ## 用 `diffusers` 训练 T2I-Adapter-SDXL 我们基于 `diffusers` 提供的 [这个官方示例](https://github.com/huggingface/diffusers/blob/main/examples/t2i_adapter/README_sdxl.md) 构建了我们的训练脚本。 本文中提到的大多数 T2I-Adapter 模型都是在 LAION-Aesthetics V2 的 3M 高分辨率 `图文对` 上训练的,配置如下: - 训练步数: 20000-35000 - batch size: 采用数据并行,单 GPU batch size 为 16,总 batch size 为 128 - 学习率: 1e-5 的恒定学习率 - 混合精度: fp16 我们鼓励社区使用我们的脚本来训练自己的强大的 T2I-Adapter,并对速度、内存和生成的图像质量进行折衷以获得竞争优势。 ## 在 `diffusers` 中使用 T2I-Adapter-SDXL 这里以线稿图为控制条件来演示 [T2I-Adapter-SDXL](https://github.com/TencentARC/T2I-Adapter/tree/XL) 的使用。首先,安装所需的依赖项: ```bash pip install -U git+https://github.com/huggingface/diffusers.git pip install -U controlnet_aux==0.0.7 # for conditioning models and detectors pip install transformers accelerate ``` T2I-Adapter-SDXL 的生成过程主要包含以下两个步骤: 1. 首先将条件图像转换为符合要求的 _控制图像_ 格式。 2. 然后将 _控制图像_ 和 _提示_ 传给 [`StableDiffusionXLAdapterPipeline`](https://github.com/huggingface/diffusers/blob/0ec7a02b6a609a31b442cdf18962d7238c5be25d/src/diffusers/pipelines/t2i_adapter/pipeline_stable_diffusion_xl_adapter.py#L126)。 我们看一个使用 [Lineart Adapter](https://huggingface.co/TencentARC/t2i-adapter-lineart-sdxl-1.0) 的简单示例。我们首先初始化 SDXL 的 T2I-Adapter 流水线以及线稿检测器。 ```python import torch from controlnet_aux.lineart import LineartDetector from diffusers import (AutoencoderKL, EulerAncestralDiscreteScheduler, StableDiffusionXLAdapterPipeline, T2IAdapter) from diffusers.utils import load_image, make_image_grid # load adapter adapter = T2IAdapter.from_pretrained( "TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16, varient="fp16" ).to("cuda") # load pipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" euler_a = EulerAncestralDiscreteScheduler.from_pretrained( model_id, subfolder="scheduler" ) vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) pipe = StableDiffusionXLAdapterPipeline.from_pretrained( model_id, vae=vae, adapter=adapter, scheduler=euler_a, torch_dtype=torch.float16, variant="fp16", ).to("cuda") # load lineart detector line_detector = LineartDetector.from_pretrained("lllyasviel/Annotators").to("cuda") ``` 然后,加载图像并生成其线稿图: ```python url = "https://huggingface.co/Adapter/t2iadapter/resolve/main/figs_SDXLV1.0/org_lin.jpg" image = load_image(url) image = line_detector(image, detect_resolution=384, image_resolution=1024) ``` ![龙的线稿图](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/lineart_dragon.png) 然后生成: ```python prompt = "Ice dragon roar, 4k photo" negative_prompt = "anime, cartoon, graphic, text, painting, crayon, graphite, abstract, glitch, deformed, mutated, ugly, disfigured" gen_images = pipe( prompt=prompt, negative_prompt=negative_prompt, image=image, num_inference_steps=30, adapter_conditioning_scale=0.8, guidance_scale=7.5, ).images[0] gen_images.save("out_lin.png") ``` ![用线稿图生成出来的龙](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/lineart_generated_dragon.png) 理解下述两个重要的参数,可以帮助你调节控制程度。 1. `adapter_conditioning_scale` 该参数调节控制图像对输入的影响程度。越大代表控制越强,反之亦然。 2. `adapter_conditioning_factor` 该参数调节适配器需应用于生成过程总步数的前面多少步,取值范围在 0-1 之间 (默认值为 1)。 `adapter_conditioning_factor=1` 表示适配器需应用于所有步,而 `adapter_conditioning_factor=0.5` 则表示它仅应用于前 50% 步。 更多详情,请查看 [官方文档](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/adapter)。 ## 试玩演示应用 你可以在 [这儿](https://huggingface.co/spaces/TencentARC/T2I-Adapter-SDXL) 或下述嵌入的游乐场中轻松试玩 T2I-Adapter-SDXL: <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.43.1/gradio.js"></script> <gradio-app src="https://tencentarc-t2i-adapter-sdxl.hf.space"></gradio-app> 你还可以试试 [Doodly](https://huggingface.co/spaces/TencentARC/T2I-Adapter-SDXL-Sketch),它用的是草图版模型,可以在文本监督的配合下,把你的涂鸦变成逼真的图像: <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.43.1/gradio.js"></script> <gradio-app src="https://tencentarc-t2i-adapter-sdxl-sketch.hf.space"></gradio-app> ## 更多结果 下面,我们展示了使用不同控制图像作为条件获得的结果。除此以外,我们还分享了相应的预训练 checkpoint 的链接。如果想知道有关如何训练这些模型的更多详细信息及其示例用法,可以参考各自模型的模型卡。 ### 使用线稿图引导图像生成 ![线稿图的更多结果](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/lineart_guided.png) _模型见 [`TencentARC/t2i-adapter-lineart-sdxl-1.0`](https://huggingface.co/TencentARC/t2i-adapter-lineart-sdxl-1.0)_ ### 使用草图引导图像生成 ![草图的结果](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/sketch_guided.png) _模型见 [`TencentARC/t2i-adapter-sketch-sdxl-1.0`](https://huggingface.co/TencentARC/t2i-adapter-sketch-sdxl-1.0)_ ### 使用 Canny 检测器检测出的边缘图引导图像生成 ![Canny 边缘图的结果](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/canny_guided.png) _模型见 [`TencentARC/t2i-adapter-canny-sdxl-1.0`](https://huggingface.co/TencentARC/t2i-adapter-canny-sdxl-1.0)_ ### 使用深度图引导图像生成 ![深度图的结果](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/depth_guided.png) _模型分别见 [`TencentARC/t2i-adapter-depth-midas-sdxl-1.0`](https://huggingface.co/TencentARC/t2i-adapter-depth-midas-sdxl-1.0) 及 [`TencentARC/t2i-adapter-depth-zoe-sdxl-1.0`](https://huggingface.co/TencentARC/t2i-adapter-depth-zoe-sdxl-1.0)_ ### 使用 OpenPose 骨骼图引导图像生成 ![OpenPose 骨骼图的结果](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/t2i-adapters-sdxl/pose_guided.png) _模型见 [`TencentARC/t2i-adapter-openpose-sdxl-1.0`](https://hf.co/TencentARC/t2i-adapter-openpose-sdxl-1.0)_ --- _致谢: 非常感谢 [William Berman](https://twitter.com/williamLberman) 帮助我们训练模型并分享他的见解。_
blog/zh/t2i-sdxl-adapters.md/0
{ "file_path": "blog/zh/t2i-sdxl-adapters.md", "repo_id": "blog", "token_count": 5513 }
57
--- title: "Kakao Brain 的开源 ViT、ALIGN 和 COYO 文字" thumbnail: /blog//assets/132_vit_align/thumbnail.png authors: - user: adirik - user: Unso - user: dylan-m - user: jun-untitled translators: - user: conyzhang --- # Kakao Brain 的开源 ViT、ALIGN 和 COYO 文字 最近 Kakao Brain 在 Hugging Face 发布了一个全新的开源图像文本数据集 [COYO](https://github.com/kakaobrain/coyo-dataset),包含 7 亿对图像和文本,并训练了两个新的视觉语言模型 [ViT](https://github.com/kakaobrain/coyo-vit) 和 [ALIGN](https://github.com/kakaobrain/coyo-align)。 这是 ALIGN 模型首次公开发布供开源使用,同时 ViT 和 ALIGN 模型的发布都附带有训练数据集。 Google 的 [ViT](https://ai.googleblog.com/2020/12/transformers-for-image-recognition-at.html) 和 [ALIGN](https://ai.googleblog.com/2021/05/align-scaling-up-visual-and-vision.html) 模型都使用了巨大的数据集 (ViT 训练于 3 亿张图像,ALIGN 训练于 18 亿个图像 - 文本对) 进行训练,因为数据集不公开导致无法复现。[Kakao Brain](https://hf.co/kakaobrain) 的 ViT 和 ALIGN 模型采用与 Google 原始模型相同的架构和超参数,不同的是其在开源 [COYO 数据集](https://github.com/kakaobrain/coyo-dataset) 上进行训练。对于想要拥有数据并复现视觉语言模型的研究人员有很大的价值。 这篇博客将介绍新的 [COYO](https://github.com/kakaobrain/coyo-dataset) 数据集、Kakao Brain 的 ViT 和 ALIGN 模型,以及如何使用它们!以下是主要要点: * 第一个开源的 ALIGN 模型! * 第一个在开源数据集 [COYO](https://github.com/kakaobrain/coyo-dataset) 上训练的开源 ViT 和 ALIGN 模型。 * Kakao Brain 的 ViT 和 ALIGN 模型表现与 Google 版本相当。 * ViT 模型在 HF 上可演示!您可以使用自己的图像样本在线体验 ViT! ## 性能比较 Kakao Brain 发布的 ViT 和 ALIGN 模型与 Google 的模型表现相当,某些方面甚至更好。Kakao Brain 的 `ALIGN-B7-Base` 模型虽然训练的数据对少得多 ( 7 亿 VS 1.8 亿),但在图像 KNN 分类任务上表现与 Google 的 `ALIGN-B7-Base` 相当,在 MS-COCO 图像 - 文本检索、文本 - 图像检索任务上表现更好。Kakao Brain 的 `ViT-L/16` 在 384×512 的 ImageNet 和 ImageNet-ReaL 数据上的表现与 Google 的 `ViT-L/16` 相当。这意味着同行可以使用 Kakao Brain 的 ViT 和 ALIGN 模型来复现 Google 的 ViT 和 ALIGN ,尤其是当用户需要训练数据时。所以我们很高兴开源这些与现有技术相当的模型! <p> <center> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/132_vit_align/vit-align-performance.png" alt="ViT and ALIGN performance"/> </center> </p> ## COYO 数据集 <p> <center> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/132_vit_align/coyo-samples.png" alt="COYO samples"/> </center> </p> 本次发布的模型特别之处在于都是基于开源的 COYO 数据集训练的。[COYO](https://github.com/kakaobrain/coyo-dataset#dataset-preview) 数据集包含 7 亿图像 - 文本对,类似于 Google 的 ALIGN 1.8B 图像 - 文本数据集,是从网页上收集的“嘈杂”的 html 文本 (alt-text) 和图像对。COYO-700M 和 ALIGN 1.8B都是“嘈杂”的,只使用了适当的清洗处理。COYO 类似于另一个开源的图像–文本数据集 `LAION`,但有一些区别。尽管 `LAION` 2B 是一个更大的数据集,包含 20 亿个英语配对,但 `COYO` 的附带有更多元数据,为用户提供更多灵活性和更细粒度的使用。以下表格显示了它们之间的区别: `COYO` 所有数据对都提供了美感评分,更健壮的水印评分和面部计数信息 (face count data)。 | COYO | LAION 2B| ALIGN 1.8B | | :----: | :----: | :----: | | Image-text similarity score calculated with CLIP ViT-B/32 and ViT-L/14 models, they are provided as metadata but nothing is filtered out so as to avoid possible elimination bias | Image-text similarity score provided with CLIP (ViT-B/32) - only examples above threshold 0.28 | Minimal, Frequency based filtering | | NSFW filtering on images and text | NSFW filtering on images | [Google Cloud API](https://cloud.google.com/vision) | | Face recognition (face count) data provided as meta-data | No face recognition data | NA | | 700 million pairs all English | 2 billion English| 1.8 billion | | From CC 2020 Oct - 2021 Aug| From CC 2014-2020| NA | |Aesthetic Score | Aesthetic Score Partial | NA| |More robust Watermark score | Watermark Score | NA| |Hugging Face Hub | Hugging Face Hub | Not made public | | English | English | English? | ## ViT 和 ALIGN 是如何工作的 这些模型是干什么的?让我们简要讨论一下 ViT 和 ALIGN 模型的工作原理。 ViT——Vision Transformer 是 [谷歌于 2020 年提出的一种视觉模型](https://ai.googleblog.com/2020/12/transformers-for-image-recognition-at.html),类似于文本 Transformer 架构。这是一种与卷积神经网络不同的视觉方法 (AlexNet 自 2012 年以来一直主导视觉任务)。同样表现下,它的计算效率比 CNN 高达四倍,且具有域不可知性 (domain agnostic)。ViT 将输入的图像分解成一系列图像块 (patch),就像文本 Transformer 输入文本序列一样,然后为每个块提供位置嵌入以学习图像结构。ViT 的性能尤其在于具有出色的性能 - 计算权衡。谷歌的一些 ViT 模型是开源的,但其训练使用的 JFT-300 百万图像 - 标签对数据集尚未公开发布。Kakao Brain 的训练模型是基于公开发布的 [COYO-Labeled-300M](https://github.com/kakaobrain/coyo-dataset/tree/main/subset/COYO-Labeled-300M) 进行训练,对应的 ViT 模型在各种任务上具有相似表现,其代码、模型和训练数据 (COYO-Labeled-300M) 完全公开,以便能够进行复现和科学研究。 <p> <center> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/132_vit_align/vit-architecture.gif" alt="ViT architecture" width="700"/> </center> </p> <p> <center> <em>A Visualization of How ViT Works from <a href="https://ai.googleblog.com/2020/12/transformers-for-image-recognition-at.html">Google Blog</a></em> </center> </p> [谷歌在 2021 年推出了 ALIGN](https://ai.googleblog.com/2021/05/align-scaling-up-visual-and-vision.html),它是一种基于“嘈杂”文本–图像数据训练的视觉语言模型,可用于各种视觉和跨模态任务,如文本 - 图像检索。ALIGN 采用简单的双编码器架构,通过对比损失函数学习图像和文本对,ALIGN 的“嘈杂”训练语料特点包括用语料规模弥补其噪音以及强大的鲁棒性。之前的视觉语言表示学习都是在手动标注的大规模数据集上进行训练,这就需要大量的预先处理和成本。ALIGN 的语料库使用 HTML 文本 (alt-text) 数据作为图像的描述,导致数据集不可避免地嘈杂,但更大的数据量 (18 亿对) 使 ALIGN 能够在各种任务上表现出 SoTA 水平。Kakao Brain 的模型是第一个 ALIGN 开源版本,它在 `COYO` 数据集上训练,表现比谷歌的结果更好。 <p> <center> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/132_vit_align/align-architecture.png" width="700" /> </center> </p> <p> <center> <em>ALIGN Model from <a href="https://ai.googleblog.com/2021/05/align-scaling-up-visual-and-vision.html">Google Blog</a> </em> </center> <p> ## 如何使用 COYO 数据集 我们可以使用 Hugging Face 🤗 数据集库的一行代码方便地下载 COYO 数据集。要预览 COYO 数据集并了解数据处理过程和包含的元属性,请前往 [hub](https://huggingface.co/datasets/kakaobrain/coyo-700m) 数据集页面。 开始前,请安装 Hugging Face 🤗 数据集库: pip install datasets,然后下载数据集。 ```shell >>> from datasets import load_dataset >>> dataset = load_dataset('kakaobrain/coyo-700m') >>> dataset ``` 由于 `COYO` 数据集非常庞大,包含 747M 个图像 - 文本对,您可能无法在本地下载整个数据集。或者可能只需要下载和使用数据集的子集。为此,可以简单地将 `streaming=True` 参数传递给 `load_dataset()` 方法,以创建可迭代数据集,并在需要时下载数据实例。 ```shell >>> from datasets import load_dataset >>> dataset = load_dataset('kakaobrain/coyo-700m', streaming=True) >>> print(next(iter(dataset['train']))) {'id': 2680060225205, 'url': 'https://cdn.shopify.com/s/files/1/0286/3900/2698/products/TVN_Huile-olive-infuse-et-s-227x300_e9a90ffd-b6d2-4118-95a1-29a5c7a05a49_800x.jpg?v=1616684087', 'text': 'Olive oil infused with Tuscany herbs', 'width': 227, 'height': 300, 'image_phash': '9f91e133b1924e4e', 'text_length': 36, 'word_count': 6, 'num_tokens_bert': 6, 'num_tokens_gpt': 9, 'num_faces': 0, 'clip_similarity_vitb32': 0.19921875, 'clip_similarity_vitl14': 0.147216796875, 'nsfw_score_opennsfw2': 0.0058441162109375, 'nsfw_score_gantman': 0.018961310386657715, 'watermark_score': 0.11015450954437256, 'aesthetic_score_laion_v2': 4.871710777282715} ``` ## 如何使用 Hub 中的 ViT 和 ALIGN 让我们尝试一下新的 ViT 和 ALIGN 模型。由于 ALIGN 是新加入 Hugging Face 🤗 Transformers 的,我们先安装最新版本的库: `pip install -q git+https://github.com/huggingface/transformers.git` 然后导入我们将要使用的模块和库,开始使用 ViT 进行图像分类。请注意,新添加的 ALIGN 模型将会包含到下一版 PyPI 包。 ```py import requests from PIL import Image import torch from transformers import ViTImageProcessor, ViTForImageClassification ``` 接下来,我们将从 COCO 数据集中随机下载一张有沙发图像,上边有两只猫和一个遥控器,并对图像进行预处理为模型所期望的输入格式,我们可以方便地使用相应的预处理器类 (`ViTProcessor`) 实现这一步。初始化模型和预处理器,可以使用 Hub 中 [Kakao Brain ViT repos](https://huggingface.co/models?search=kakaobrain/vit) 之一。请注意使用 Hub 中的库预处理器,确保预处理后的图像符合特定预训练模型所需的格式。 ```py url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = ViTImageProcessor.from_pretrained('kakaobrain/vit-large-patch16-384') model = ViTForImageClassification.from_pretrained('kakaobrain/vit-large-patch16-384') ``` 接下来将图像预处理并将其输入到模型,实现检索类别标签。Kakao Brain ViT 图像分类模型是在 ImageNet 标签上训练的,输出形状为 batch_size×1000 维度的类别 (logits)。 ```py # preprocess image or list of images inputs = processor(images=image, return_tensors="pt") # inference with torch.no_grad(): outputs = model(**inputs) # apply SoftMax to logits to compute the probability of each class preds = torch.nn.functional.softmax(outputs.logits, dim=-1) # print the top 5 class predictions and their probabilities top_class_preds = torch.argsort(preds, descending=True)[0, :5] for c in top_class_preds: print(f"{model.config.id2label[c.item()]} with probability {round(preds[0, c.item()].item(), 4)}") ``` 到这里就完成了!为了更加简单和简洁,还可以使用图像分类管道 ([pipeline](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.ImageClassificationPipeline)) 并将 Kakao Brain ViT 仓库名称作为目标模型传递给初始化管道。然后,我们可以传入图像的 URL 或本地路径,或 Pillow 图像,可选“top_k”参数表述返回前 k 个预测。让我们继续对猫和遥控器图片获取前 5 个预测结果。 ```shell >>> from transformers import pipeline >>> classifier = pipeline(task='image-classification', model='kakaobrain/vit-large-patch16-384') >>> classifier('http://images.cocodataset.org/val2017/000000039769.jpg', top_k=5) [{'score': 0.8223727941513062, 'label': 'remote control, remote'}, {'score': 0.06580372154712677, 'label': 'tabby, tabby cat'}, {'score': 0.0655883178114891, 'label': 'tiger cat'}, {'score': 0.0388941615819931, 'label': 'Egyptian cat'}, {'score': 0.0011215205304324627, 'label': 'lynx, catamount'}] ``` 如果您想更多地尝试 Kakao Brain ViT 模型,请前往 🤗 Hub 中心的项目 [空间](https://huggingface.co/spaces/adirik/kakao-brain-vit)。 <center> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/132_vit_align/vit_demo.png" alt="vit performance" width="900"/> </center> 我们开始实验 ALIGN,它可用于检索文本或图像的多模态嵌入或执行零样本图像分类。ALIGN 的 Transformer 实现和用法类似于 [CLIP](https://huggingface.co/docs/transformers/main/en/model_doc/clip)。首先,下载预训练模型和其处理器 (processor),处理器预处理图像和文本,使它们符合 ALIGN 的预期格式,以便将其输入到视觉和文本编码器中。这步导入了我们将要使用的模块并初始化预处理器和模型。 ```py import requests from PIL import Image import torch from transformers import AlignProcessor, AlignModel url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AlignProcessor.from_pretrained('kakaobrain/align-base') model = AlignModel.from_pretrained('kakaobrain/align-base') ``` 先从零样本图像分类开始。为此,我们将提供候选标签 (自由格式文本),并使用 AlignModel 找出更好地描述图像的表述。我们将首先预处理图像和文本输入,并将预处理后的输入送到 AlignModel 中。 ```py candidate_labels = ['an image of a cat', 'an image of a dog'] inputs = processor(images=image, text=candidate_labels, return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) # this is the image-text similarity score logits_per_image = outputs.logits_per_image # we can take the softmax to get the label probabilities probs = logits_per_image.softmax(dim=1) print(probs) ``` 完成了,就这么简单。要进一步尝试 Kakao Brain ALIGN 模型进行零样本图像分类,只需前往 Hugging Face 🤗 Hub 上的 [demo](https://huggingface.co/spaces/adirik/ALIGN-zero-shot-image-classification) 演示。请注意, `AlignModel` 的输出包括 `text_embeds` 和 `image_embeds` (参阅 ALIGN 的 [文档](https://huggingface.co/docs/transformers/main/en/model_doc/align))。如果不需要计算用于零样本分类的每个图像和每个文本的逻辑 (logits),可以使用 `AlignModel` 类中的 `get_image_features()` 和 `get_text_features()` 方法便捷地检索视觉和文本嵌入。 ```py text_embeds = model.get_text_features( input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], ) image_embeds = model.get_image_features( pixel_values=inputs['pixel_values'], ) ``` 或者,我们可以使用 ALIGN 的独立视觉和文本编码器获取多模态嵌入。然后可以使用这些嵌入用于各种下游任务的模型训练,例如目标检测、图像分割和图像字幕生成。让我们看看如何使用 `AlignTextModel` 和 `AlignVisionModel` 获取这些嵌入。请注意,我们可以使用便捷的 AlignProcessor 类分别对文本和图像进行预处理。 ```py from transformers import AlignTextModel processor = AlignProcessor.from_pretrained('kakaobrain/align-base') model = AlignTextModel.from_pretrained('kakaobrain/align-base') # get embeddings of two text queries inputs = processor(['an image of a cat', 'an image of a dog'], return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) # get the last hidden state and the final pooled output last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output ``` 我们也可以在推理过程中设置 output_hidden_states 和 output_attentions 参数为 True,以返回所有隐藏状态和注意力值。 ```py with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) # print what information is returned for key, value in outputs.items(): print(key) ``` 在 `AlignVisionModel` 中执行相同的操作,获取图像的多模态嵌入。 ```py from transformers import AlignVisionModel processor = AlignProcessor.from_pretrained('kakaobrain/align-base') model = AlignVisionModel.from_pretrained('kakaobrain/align-base') url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, return_tensors='pt') with torch.no_grad(): outputs = model(**inputs) # print the last hidden state and the final pooled output last_hidden_state = outputs.last_hidden_state pooled_output = outputs.pooler_output ``` 与 ViT 类似,使用零样本图像分类管道 ([pipeline](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.ZeroShotImageClassificationPipeline)) 可以让过程更加轻松。以下实现了如何使用此流程使用自由文本候选标签在野外执行图像分类。 ```shell >>> from transformers import pipeline >>> classifier = pipeline(task='zero-shot-image-classification', model='kakaobrain/align-base') >>> classifier( ... 'https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png', ... candidate_labels=['animals', 'humans', 'landscape'], ... ) [{'score': 0.9263709783554077, 'label': 'animals'}, {'score': 0.07163811475038528, 'label': 'humans'}, {'score': 0.0019908479880541563, 'label': 'landscape'}] >>> classifier( ... 'https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png', ... candidate_labels=['black and white', 'photorealist', 'painting'], ... ) [{'score': 0.9735308885574341, 'label': 'black and white'}, {'score': 0.025493400171399117, 'label': 'photorealist'}, {'score': 0.0009757201769389212, 'label': 'painting'}] ``` ## 结论 近年来,多模态取得了令人难以置信的进展,例如 CLIP 和 ALIGN 等模型赋能了各种下游任务,例如图像描述、零样本图像分类和开放世界目标检测。本博客,我们介绍了由 Kakao Brain 贡献的最新开源代码 ViT 和 ALIGN 模型,以及新的 COYO 文本 - 图像数据集。展示了如何使用这些模型执行各种任务,只需几行代码即可单独使用或作为 🤗 Transformers pipeline 的一部分使用。 我们正在继续整合最有影响力的计算机视觉和多模型模型,并乐于听取您的反馈。要了解计算机视觉和多模态研究的最新消息,作者及 Twitter:[@adirik](https://twitter.com/https://twitter.com/alaradirik), [@a_e_roberts](https://twitter.com/a_e_roberts), [@NielsRogge](https://twitter.com/NielsRogge), [@RisingSayak](https://twitter.com/RisingSayak), and [@huggingface](https://twitter.com/huggingface).
blog/zh/vit-align.md/0
{ "file_path": "blog/zh/vit-align.md", "repo_id": "blog", "token_count": 10135 }
58
# Hello world! We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. Open `src/main.rs` and fill in this content: ```rust # extern crate candle_core; use candle_core::{Device, Result, Tensor}; struct Model { first: Tensor, second: Tensor, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = image.matmul(&self.first)?; let x = x.relu()?; x.matmul(&self.second) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Everything should now run with: ```bash cargo run --release ``` ## Using a `Linear` layer. Now that we have this, we might want to complexify things a bit, for instance by adding `bias` and creating the classical `Linear` layer. We can do as such ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; struct Linear{ weight: Tensor, bias: Tensor, } impl Linear{ fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = x.matmul(&self.weight)?; x.broadcast_add(&self.bias) } } struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } ``` This will change the model running code into a new function ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; # struct Linear{ # weight: Tensor, # bias: Tensor, # } # impl Linear{ # fn forward(&self, x: &Tensor) -> Result<Tensor> { # let x = x.matmul(&self.weight)?; # x.broadcast_add(&self.bias) # } # } # # struct Model { # first: Linear, # second: Linear, # } # # impl Model { # fn forward(&self, image: &Tensor) -> Result<Tensor> { # let x = self.first.forward(image)?; # let x = x.relu()?; # self.second.forward(&x) # } # } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. // Use Device::Cpu; to use the CPU. let device = Device::cuda_if_available(0)?; // Creating a dummy model let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear{weight, bias}; let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear{weight, bias}; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; // Inference on the model let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Now it works, it is a great way to create your own layers. But most of the classical layers are already implemented in [candle-nn](https://github.com/huggingface/candle/tree/main/candle-nn). ## Using `candle_nn`. For instance [Linear](https://github.com/huggingface/candle/blob/main/candle-nn/src/linear.rs) is already there. This Linear is coded with PyTorch layout in mind, to reuse better existing models out there, so it uses the transpose of the weights and not the weights directly. So instead we can simplify our example: ```bash cargo add --git https://github.com/huggingface/candle.git candle-nn ``` And rewrite our examples using it ```rust # extern crate candle_core; # extern crate candle_nn; use candle_core::{Device, Result, Tensor}; use candle_nn::{Linear, Module}; struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; // This has changed (784, 100) -> (100, 784) ! let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear::new(weight, Some(bias)); let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear::new(weight, Some(bias)); let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Feel free to modify this example to use `Conv2d` to create a classical convnet instead. Now that we have the running dummy code we can get to more advanced topics: - [For PyTorch users](../guide/cheatsheet.md) - [Running existing models](../inference/inference.md) - [Training models](../training/training.md)
candle/candle-book/src/guide/hello_world.md/0
{ "file_path": "candle/candle-book/src/guide/hello_world.md", "repo_id": "candle", "token_count": 2069 }
59
# candle Minimalist ML framework for Rust
candle/candle-core/README.md/0
{ "file_path": "candle/candle-core/README.md", "repo_id": "candle", "token_count": 11 }
60
use crate::{op::BackpropOp, op::Op, Error, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv1D { pub(crate) b_size: usize, // Maybe we should have a version without l_in as this bit depends on the input and not only on // the weights. pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConv1D { pub(crate) fn l_out(&self) -> usize { (self.l_in + 2 * self.padding - self.dilation * (self.k_size - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose1D { pub(crate) b_size: usize, pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose1D { pub(crate) fn l_out(&self) -> usize { (self.l_in - 1) * self.stride - 2 * self.padding + self.dilation * (self.k_size - 1) + self.output_padding + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum CudnnFwdAlgo { ImplicitGemm, ImplicitPrecompGemm, Gemm, Direct, Fft, FftTiling, Winograd, WinogradNonFused, Count, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, pub cudnn_fwd_algo: Option<CudnnFwdAlgo>, } impl ParamsConv2D { pub(crate) fn out_h(&self) -> usize { (self.i_h + 2 * self.padding - self.dilation * (self.k_h - 1) - 1) / self.stride + 1 } pub(crate) fn out_w(&self) -> usize { (self.i_w + 2 * self.padding - self.dilation * (self.k_w - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose2D { pub(crate) fn out_h(&self) -> usize { (self.i_h - 1) * self.stride + self.dilation * (self.k_h - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_w(&self) -> usize { (self.i_w - 1) * self.stride + self.dilation * (self.k_w - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } impl Tensor { fn conv1d_single_group(&self, kernel: &Self, params: &ParamsConv1D) -> Result<Self> { let storage = self.storage() .conv1d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv1D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D convolution over the input tensor. pub fn conv1d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_out, c_in_k, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k * groups { Err(Error::Conv1dInvalidArgs { inp_shape: self.shape().clone(), k_shape: kernel.shape().clone(), padding, stride, msg: "the number of in-channels on the input doesn't match the kernel size", } .bt())? } let params = ParamsConv1D { b_size, l_in, c_out: c_out / groups, c_in: c_in / groups, k_size, padding, stride, dilation, }; if groups == 1 { self.conv1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv_transpose1d_single_group( &self, kernel: &Self, params: &ParamsConvTranspose1D, ) -> Result<Self> { let storage = self.storage().conv_transpose1d( self.layout(), &kernel.storage(), kernel.layout(), params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose1D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D transposed convolution over the input tensor. pub fn conv_transpose1d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_in_k, c_out, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } if c_in % groups != 0 { crate::bail!("in_channel {c_in} is not divisible by the number of groups") } let params = ParamsConvTranspose1D { b_size, l_in, k_size, c_out, c_in: c_in / groups, padding, output_padding, stride, dilation, }; if groups == 1 { self.conv_transpose1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv_transpose1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv2d_single_group(&self, kernel: &Self, params: &ParamsConv2D) -> Result<Self> { let storage = self.storage() .conv2d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv2D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 2D convolution over the input tensor. pub fn conv2d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_out, c_in_k, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k * groups { crate::bail!( "in_channel mismatch between input ({c_in}, groups {groups}) and kernel ({c_in_k})" ) } let params = ParamsConv2D { b_size, i_h, i_w, k_h, k_w, c_out: c_out / groups, c_in: c_in / groups, padding, stride, dilation, cudnn_fwd_algo: None, }; if groups == 1 { self.conv2d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv2d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } /// Applies a 2D transposed convolution over the input tensor. pub fn conv_transpose2d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_in_k, c_out, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } let params = ParamsConvTranspose2D { b_size, i_h, i_w, k_h, k_w, c_out, c_in, padding, output_padding, stride, dilation, }; let storage = self.storage().conv_transpose2d( self.layout(), &kernel.storage(), kernel.layout(), &params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose2D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } }
candle/candle-core/src/conv.rs/0
{ "file_path": "candle/candle-core/src/conv.rs", "repo_id": "candle", "token_count": 5807 }
61
use crate::backend::BackendDevice; use crate::cpu_backend::CpuDevice; use crate::{CpuStorage, DType, Result, Shape, Storage, WithDType}; /// A `DeviceLocation` represents a physical device whereas multiple `Device` /// can live on the same location (typically for cuda devices). #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DeviceLocation { Cpu, Cuda { gpu_id: usize }, Metal { gpu_id: usize }, } #[derive(Debug, Clone)] pub enum Device { Cpu, Cuda(crate::CudaDevice), Metal(crate::MetalDevice), } pub trait NdArray { fn shape(&self) -> Result<Shape>; fn to_cpu_storage(&self) -> CpuStorage; } impl<S: WithDType> NdArray for S { fn shape(&self) -> Result<Shape> { Ok(Shape::from(())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(&[*self]) } } impl<S: WithDType, const N: usize> NdArray for &[S; N] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self.as_slice()) } } impl<S: WithDType> NdArray for &[S] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self) } } impl<S: WithDType, const N: usize, const M: usize> NdArray for &[[S; N]; M] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((M, N))) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage_owned(self.concat()) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> NdArray for &[[[S; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3); for i1 in 0..N1 { for i2 in 0..N2 { vec.extend(self[i1][i2]) } } S::to_cpu_storage_owned(vec) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> NdArray for &[[[[S; N4]; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3, N4))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4); for i1 in 0..N1 { for i2 in 0..N2 { for i3 in 0..N3 { vec.extend(self[i1][i2][i3]) } } } S::to_cpu_storage_owned(vec) } } impl<S: NdArray> NdArray for Vec<S> { fn shape(&self) -> Result<Shape> { if self.is_empty() { crate::bail!("empty array") } let shape0 = self[0].shape()?; let n = self.len(); for v in self.iter() { let shape = v.shape()?; if shape != shape0 { crate::bail!("two elements have different shapes {shape:?} {shape0:?}") } } Ok(Shape::from([[n].as_slice(), shape0.dims()].concat())) } fn to_cpu_storage(&self) -> CpuStorage { // This allocates intermediary memory and shouldn't be necessary. let storages = self.iter().map(|v| v.to_cpu_storage()).collect::<Vec<_>>(); CpuStorage::concat(storages.as_slice()).unwrap() } } impl Device { pub fn new_cuda(ordinal: usize) -> Result<Self> { Ok(Self::Cuda(crate::CudaDevice::new(ordinal)?)) } pub fn new_metal(ordinal: usize) -> Result<Self> { Ok(Self::Metal(crate::MetalDevice::new(ordinal)?)) } pub fn set_seed(&self, seed: u64) -> Result<()> { match self { Self::Cpu => CpuDevice.set_seed(seed), Self::Cuda(c) => c.set_seed(seed), Self::Metal(m) => m.set_seed(seed), } } pub fn same_device(&self, rhs: &Self) -> bool { match (self, rhs) { (Self::Cpu, Self::Cpu) => true, (Self::Cuda(lhs), Self::Cuda(rhs)) => lhs.same_device(rhs), (Self::Metal(lhs), Self::Metal(rhs)) => lhs.same_device(rhs), _ => false, } } pub fn location(&self) -> DeviceLocation { match self { Self::Cpu => DeviceLocation::Cpu, Self::Cuda(device) => device.location(), Device::Metal(device) => device.location(), } } pub fn is_cpu(&self) -> bool { matches!(self, Self::Cpu) } pub fn is_cuda(&self) -> bool { matches!(self, Self::Cuda(_)) } pub fn is_metal(&self) -> bool { matches!(self, Self::Metal(_)) } pub fn cuda_if_available(ordinal: usize) -> Result<Self> { if crate::utils::cuda_is_available() { Self::new_cuda(ordinal) } else { Ok(Self::Cpu) } } pub(crate) fn rand_uniform_f64( &self, lo: f64, up: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_uniform(shape, DType::F32, lo, up)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_uniform<T: crate::FloatDType>( &self, lo: T, up: T, shape: &Shape, ) -> Result<Storage> { self.rand_uniform_f64(lo.to_f64(), up.to_f64(), shape, T::DTYPE) } pub(crate) fn rand_normal_f64( &self, mean: f64, std: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_normal(shape, DType::F32, mean, std)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_normal<T: crate::FloatDType>( &self, mean: T, std: T, shape: &Shape, ) -> Result<Storage> { self.rand_normal_f64(mean.to_f64(), std.to_f64(), shape, T::DTYPE) } pub(crate) fn ones(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.ones_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn zeros(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.zeros_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.alloc_uninit(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_from_slice<D: WithDType>(&self, data: &[D]) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(data.to_cpu_storage())), Device::Cuda(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage<A: NdArray>(&self, array: A) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(array.to_cpu_storage())), Device::Cuda(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_owned<S: WithDType>(&self, data: Vec<S>) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(S::to_cpu_storage_owned(data))), Device::Cuda(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub fn synchronize(&self) -> Result<()> { match self { Self::Cpu => Ok(()), Self::Cuda(d) => d.synchronize(), Self::Metal(d) => d.synchronize(), } } }
candle/candle-core/src/device.rs/0
{ "file_path": "candle/candle-core/src/device.rs", "repo_id": "candle", "token_count": 5872 }
62
use super::{GgmlDType, QStorage}; use crate::quantized::k_quants::GgmlType; use crate::{backend::BackendDevice, cuda_backend::WrapErr}; use crate::{CudaDevice, CudaStorage, Result}; use half::f16; use cudarc::driver::{CudaSlice, CudaView, DeviceSlice}; #[derive(Clone, Debug)] pub struct QCudaStorage { data: CudaSlice<u8>, dtype: GgmlDType, device: CudaDevice, } static FORCE_DMMV: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); pub fn set_force_dmmv(f: bool) { FORCE_DMMV.store(f, std::sync::atomic::Ordering::Relaxed) } pub const WARP_SIZE: usize = 32; pub const MMQ_X_Q4_0_AMPERE: usize = 4; pub const MMQ_Y_Q4_0_AMPERE: usize = 32; pub const NWARPS_Q4_0_AMPERE: usize = 4; pub const GGML_CUDA_MMV_X: usize = 32; pub const GGML_CUDA_MMV_Y: usize = 1; pub const CUDA_QUANTIZE_BLOCK_SIZE: usize = 256; pub const CUDA_DEQUANTIZE_BLOCK_SIZE: usize = 256; pub const MATRIX_ROW_PADDING: usize = 512; fn ceil_div(p: usize, q: usize) -> usize { (p + q - 1) / q } fn pad(p: usize, q: usize) -> usize { ceil_div(p, q) * q } fn quantize_q8_1( src: &CudaView<f32>, dst: &mut CudaSlice<u8>, elem_count: usize, ky: usize, dev: &CudaDevice, ) -> Result<()> { use cudarc::driver::LaunchAsync; let kx = elem_count; let kx_padded = pad(kx, MATRIX_ROW_PADDING); let num_blocks = ceil_div(kx_padded, CUDA_QUANTIZE_BLOCK_SIZE); let func = dev.get_or_load_func("quantize_q8_1", candle_kernels::QUANTIZED)?; let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, ky as u32, 1), block_dim: (CUDA_QUANTIZE_BLOCK_SIZE as u32, 1, 1), shared_mem_bytes: 0, }; let params = (src, dst, kx as i32, kx_padded as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(()) } fn dequantize_f32( data: &CudaSlice<u8>, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f32", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f32", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f32", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f32", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f32", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f32", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f32", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f32", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f32", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (data, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (data, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_f16( data: &CudaSlice<u8>, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f16", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f16", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f16", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f16", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f16", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f16", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f16", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f16", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f16", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f16>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (data, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (data, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_mul_mat_vec( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } let kernel_name = match dtype { GgmlDType::Q4_0 => "dequantize_mul_mat_vec_q4_0_cuda", GgmlDType::Q4_1 => "dequantize_mul_mat_vec_q4_1_cuda", GgmlDType::Q5_0 => "dequantize_mul_mat_vec_q5_0_cuda", GgmlDType::Q5_1 => "dequantize_mul_mat_vec_q5_1_cuda", GgmlDType::Q8_0 => "dequantize_mul_mat_vec_q8_0_cuda", GgmlDType::Q2K => "dequantize_mul_mat_vec_q2_k", GgmlDType::Q3K => "dequantize_mul_mat_vec_q3_k", GgmlDType::Q4K => "dequantize_mul_mat_vec_q4_k", GgmlDType::Q5K => "dequantize_mul_mat_vec_q5_k", GgmlDType::Q6K => "dequantize_mul_mat_vec_q6_k", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows).w()? }; let block_num_y = ceil_div(nrows, GGML_CUDA_MMV_Y); let cfg = cudarc::driver::LaunchConfig { grid_dim: (block_num_y as u32, 1, 1), block_dim: (WARP_SIZE as u32, GGML_CUDA_MMV_Y as u32, 1), shared_mem_bytes: 0, }; let params = (data, y, &dst, ncols as i32, nrows as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn mul_mat_vec_via_q8_1( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, b_size: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols * b_size { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } if b_size == 0 || b_size > 8 { crate::bail!("only bsize between 1 and 8 are supported, got {b_size}") } // Start by quantizing y let ncols_padded = pad(ncols, MATRIX_ROW_PADDING); let y_size_in_bytes = b_size * ncols_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, ncols, b_size, dev)?; let kernel_name = match dtype { GgmlDType::Q4_0 => "mul_mat_vec_q4_0_q8_1_cuda", GgmlDType::Q4_1 => "mul_mat_vec_q4_1_q8_1_cuda", GgmlDType::Q5_0 => "mul_mat_vec_q5_0_q8_1_cuda", GgmlDType::Q5_1 => "mul_mat_vec_q5_1_q8_1_cuda", GgmlDType::Q8_0 => "mul_mat_vec_q8_0_q8_1_cuda", GgmlDType::Q2K => "mul_mat_vec_q2_K_q8_1_cuda", GgmlDType::Q3K => "mul_mat_vec_q3_K_q8_1_cuda", GgmlDType::Q4K => "mul_mat_vec_q4_K_q8_1_cuda", GgmlDType::Q5K => "mul_mat_vec_q5_K_q8_1_cuda", GgmlDType::Q6K => "mul_mat_vec_q6_K_q8_1_cuda", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let kernel_name = format!("{kernel_name}{b_size}"); let func = dev.get_or_load_func(&kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows * b_size).w()? }; // https://github.com/ggerganov/llama.cpp/blob/facb8b56f8fd3bb10a693bf0943ae9d69d0828ef/ggml-cuda/mmvq.cu#L98 let (nblocks, nwarps) = match b_size { 1 => (nrows as u32, 4), 2..=4 => ((nrows as u32 + 1) / 2, 4), 5..=8 => ((nrows as u32 + 1) / 2, 2), _ => crate::bail!("unexpected bsize {b_size}"), }; let cfg = cudarc::driver::LaunchConfig { grid_dim: (nblocks, 1, 1), block_dim: (WARP_SIZE as u32, nwarps, 1), shared_mem_bytes: 0, }; let params = ( data, &y_q8_1, &dst, /* ncols_x */ ncols as i32, /* nrows_x */ nrows as i32, /* nrows_y */ ncols_padded as i32, /* nrows_dst */ nrows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } #[allow(clippy::too_many_arguments)] fn mul_mat_via_q8_1( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, x_rows: usize, x_cols: usize, y_rows: usize, y_cols: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < x_rows * x_cols { crate::bail!("unexpected lhs size {}, {x_rows} {x_cols}", data_elems) } if y.len() != y_rows * y_cols { crate::bail!("unexpected y size {}, {y_rows} {y_cols}", y.len()) } if x_cols != y_rows { crate::bail!("unexpected x/y size {x_rows} {x_cols} {y_rows} {y_cols}") } let k = x_cols; // Start by quantizing y let k_padded = pad(k, MATRIX_ROW_PADDING); let y_size_in_bytes = k_padded * y_rows * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, k, y_cols, dev)?; let (kernel_name, mmq_x, mmq_y) = match dtype { GgmlDType::Q4_0 => ("mul_mat_q4_0", 64, 128), GgmlDType::Q4_1 => ("mul_mat_q4_1", 64, 128), GgmlDType::Q5_0 => ("mul_mat_q5_0", 128, 64), GgmlDType::Q5_1 => ("mul_mat_q5_1", 128, 64), GgmlDType::Q8_0 => ("mul_mat_q8_0", 128, 64), GgmlDType::Q2K => ("mul_mat_q2_K", 64, 128), GgmlDType::Q3K => ("mul_mat_q3_K", 128, 128), GgmlDType::Q4K => ("mul_mat_q4_K", 64, 128), GgmlDType::Q5K => ("mul_mat_q5_K", 64, 128), GgmlDType::Q6K => ("mul_mat_q6_K", 64, 64), _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(x_rows * y_cols).w()? }; let cfg = cudarc::driver::LaunchConfig { grid_dim: ( ceil_div(x_rows, mmq_y) as u32, ceil_div(y_cols, mmq_x) as u32, 1, ), block_dim: (WARP_SIZE as u32, 4, 1), shared_mem_bytes: 0, }; let params = ( /* vx */ data, /* vy */ &y_q8_1, /* dst */ &dst, /* ncols_x */ x_cols as i32, /* nrows_x */ x_rows as i32, /* ncols_y */ y_cols as i32, /* nrows_y */ k_padded as i32, /* nrows_dst */ x_rows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } impl QCudaStorage { pub fn zeros(device: &CudaDevice, el_count: usize, dtype: GgmlDType) -> Result<Self> { let size_in_bytes = ceil_div(el_count, dtype.block_size()) * dtype.type_size(); let data = device.alloc_zeros::<u8>(size_in_bytes).w()?; Ok(QCudaStorage { data, device: device.clone(), dtype, }) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &CudaDevice { &self.device } pub fn dequantize(&self, elem_count: usize) -> Result<CudaStorage> { fn deq<T: GgmlType>(buffer: &[u8], n: usize, dst: &mut [f32]) -> Result<()> { let slice = unsafe { std::slice::from_raw_parts(buffer.as_ptr() as *const T, n) }; let vec = slice.to_vec(); T::to_float(&vec, dst) } let fast_kernel = matches!( self.dtype, GgmlDType::Q4_0 | GgmlDType::Q4_1 | GgmlDType::Q5_0 | GgmlDType::Q5_1 | GgmlDType::Q8_0 | GgmlDType::Q2K | GgmlDType::Q3K | GgmlDType::Q4K | GgmlDType::Q5K | GgmlDType::Q6K | GgmlDType::Q8K ); if fast_kernel { return dequantize_f32(&self.data, self.dtype, elem_count, self.device()); } // Run the dequantization on cpu. let buffer = self.device.dtoh_sync_copy(&self.data).w()?; let mut out = vec![0.0; elem_count]; let block_len = elem_count / self.dtype.block_size(); match self.dtype { GgmlDType::F32 => deq::<f32>(&buffer, block_len, &mut out)?, GgmlDType::F16 => deq::<half::f16>(&buffer, block_len, &mut out)?, GgmlDType::Q4_0 => deq::<crate::quantized::BlockQ4_0>(&buffer, block_len, &mut out)?, GgmlDType::Q4_1 => deq::<crate::quantized::BlockQ4_1>(&buffer, block_len, &mut out)?, GgmlDType::Q5_0 => deq::<crate::quantized::BlockQ5_0>(&buffer, block_len, &mut out)?, GgmlDType::Q5_1 => deq::<crate::quantized::BlockQ5_1>(&buffer, block_len, &mut out)?, GgmlDType::Q8_0 => deq::<crate::quantized::BlockQ8_0>(&buffer, block_len, &mut out)?, GgmlDType::Q8_1 => deq::<crate::quantized::BlockQ8_1>(&buffer, block_len, &mut out)?, GgmlDType::Q2K => deq::<crate::quantized::BlockQ2K>(&buffer, block_len, &mut out)?, GgmlDType::Q3K => deq::<crate::quantized::BlockQ3K>(&buffer, block_len, &mut out)?, GgmlDType::Q4K => deq::<crate::quantized::BlockQ4K>(&buffer, block_len, &mut out)?, GgmlDType::Q5K => deq::<crate::quantized::BlockQ5K>(&buffer, block_len, &mut out)?, GgmlDType::Q6K => deq::<crate::quantized::BlockQ6K>(&buffer, block_len, &mut out)?, GgmlDType::Q8K => deq::<crate::quantized::BlockQ8K>(&buffer, block_len, &mut out)?, } self.device .storage_from_cpu_storage(&crate::CpuStorage::F32(out)) } pub fn dequantize_f16(&self, elem_count: usize) -> Result<CudaStorage> { dequantize_f16(&self.data, self.dtype, elem_count, self.device()) } pub fn quantize(&mut self, src: &CudaStorage) -> Result<()> { // Run the quantization on cpu. let src = match &src.slice { crate::cuda_backend::CudaStorageSlice::F32(data) => { self.device.dtoh_sync_copy(data).w()? } _ => crate::bail!("only f32 can be quantized"), }; let src_len = src.len(); let src = crate::Storage::Cpu(crate::CpuStorage::F32(src)); let mut qcpu_storage = crate::Device::Cpu.qzeros(src_len, self.dtype)?; qcpu_storage.quantize(&src)?; let data = qcpu_storage.data()?; let data = self.device.htod_sync_copy(data.as_ref()).w()?; self.data = data; Ok(()) } pub fn storage_size_in_bytes(&self) -> usize { self.data.len() } pub fn fwd( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let max_bm = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { 1 } else { 8 }; let use_vec_kernel = match layout.shape().dims() { [b, m, _k] => b * m <= max_bm, [b, _k] => *b <= max_bm, _ => false, }; if use_vec_kernel { self.dequantize_matmul_vec(self_shape, storage, layout) } else { self.dequantize_matmul(self_shape, storage, layout) } } } impl QCudaStorage { fn dequantize_matmul_vec( &self, self_shape: &crate::Shape, rhs: &CudaStorage, rhs_l: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let (nrows, ncols) = self_shape.dims2()?; let rhs = rhs.as_cuda_slice::<f32>()?; let rhs = match rhs_l.contiguous_offsets() { Some((o1, o2)) => rhs.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "dmmv" }.bt())?, }; let (b_size, k) = match rhs_l.shape().dims() { [b, m, k] => (b * m, *k), [b, k] => (*b, *k), _ => crate::bail!("unexpected rhs shape in dmmv {:?}", rhs_l.shape()), }; if ncols != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", rhs_l.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { dequantize_mul_mat_vec(&self.data, &rhs, self.dtype, ncols, nrows, self.device())? } else { mul_mat_vec_via_q8_1( &self.data, &rhs, self.dtype, ncols, nrows, b_size, self.device(), )? }; let mut out_shape = rhs_l.shape().dims().to_vec(); out_shape.pop(); out_shape.push(nrows); Ok((out, out_shape.into())) } fn dequantize_matmul( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { use crate::backend::BackendStorage; let (n, k) = self_shape.dims2()?; let (b, m, k2) = match layout.shape().dims() { &[b, m, k2] => (b, m, k2), &[m, k2] => (1, m, k2), s => crate::bail!("unexpected shape for input {s:?}"), }; if k2 != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", layout.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { let data_f32 = self.dequantize(n * k)?; let rhs_l = crate::Layout::new((k, n).into(), vec![1, k], 0).broadcast_as((b, k, n))?; storage.matmul(&data_f32, (b, m, n, k), layout, &rhs_l)? } else { let storage = storage.as_cuda_slice::<f32>()?; let storage = match layout.contiguous_offsets() { Some((o1, o2)) => storage.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "quantized-matmul", } .bt())?, }; mul_mat_via_q8_1( &self.data, &storage, self.dtype, /* x_rows */ n, /* x_cols */ k, /* y_rows */ k, /* y_cols */ b * m, self.device(), )? }; let mut out_shape = layout.shape().dims().to_vec(); out_shape.pop(); out_shape.push(n); Ok((out, out_shape.into())) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( device: &CudaDevice, data: &[T], ) -> Result<super::QStorage> { let data = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, core::mem::size_of_val(data)) }; let data = device.htod_sync_copy(data).w()?; Ok(QStorage::Cuda(QCudaStorage { data, device: device.clone(), dtype: T::DTYPE, })) } #[cfg(test)] mod test { use super::*; #[test] fn cuda_quantize_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let el = 256; let el_padded = pad(el, MATRIX_ROW_PADDING); let y_size_in_bytes = el_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; let vs: Vec<f32> = (0..el).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; quantize_q8_1(&y.slice(..), &mut y_q8_1, el, 1, &dev)?; Ok(()) } #[test] fn cuda_mmv_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_vec_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, /* b_size */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); // for n = 255, n.(n+1).(2n+1) / 6 = 5559680 // Q8 means 1/256 precision. assert_eq!(vs[0], 5561664.5); let cuda_storage = dequantize_mul_mat_vec( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); assert_eq!(vs[0], 5561851.0); Ok(()) } #[test] fn cuda_mm_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols * 4).map(|v| v as f32 / 4.).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols * 4, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* x_rows */ 4, /* x_cols */ ncols, /* y_rows */ ncols, /* y_cols */ 4, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); /* x = torch.tensor([float(v) for v in range(1024)]).reshape(4, 256) x @ x.t() / 16 tensor([[ 347480.0000, 869720.0000, 1391960.0000, 1914200.0000], [ 869720.0000, 2440536.0000, 4011352.0000, 5582166.5000], [ 1391960.0000, 4011352.0000, 6630742.0000, 9250132.0000], [ 1914200.0000, 5582166.5000, 9250132.0000, 12918099.0000]]) */ assert_eq!(vs.len(), 16); assert_eq!(vs[0], 347604.0); assert_eq!(vs[1], 888153.06); assert_eq!(vs[4], 869780.7); assert_eq!(vs[5], 2483145.0); assert_eq!(vs[11], 9407368.0); assert_eq!(vs[14], 9470856.0); assert_eq!(vs[15], 13138824.0); Ok(()) } }
candle/candle-core/src/quantized/cuda.rs/0
{ "file_path": "candle/candle-core/src/quantized/cuda.rs", "repo_id": "candle", "token_count": 13702 }
63
use crate::Layout; /// An iterator over offset position for items of an N-dimensional arrays stored in a /// flat buffer using some potential strides. #[derive(Debug)] pub struct StridedIndex<'a> { next_storage_index: Option<usize>, multi_index: Vec<usize>, dims: &'a [usize], stride: &'a [usize], } impl<'a> StridedIndex<'a> { pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self { let elem_count: usize = dims.iter().product(); let next_storage_index = if elem_count == 0 { None } else { // This applies to the scalar case. Some(start_offset) }; StridedIndex { next_storage_index, multi_index: vec![0; dims.len()], dims, stride, } } pub(crate) fn from_layout(l: &'a Layout) -> Self { Self::new(l.dims(), l.stride(), l.start_offset()) } } impl<'a> Iterator for StridedIndex<'a> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let storage_index = match self.next_storage_index { None => return None, Some(storage_index) => storage_index, }; let mut updated = false; let mut next_storage_index = storage_index; for ((multi_i, max_i), stride_i) in self .multi_index .iter_mut() .zip(self.dims.iter()) .zip(self.stride.iter()) .rev() { let next_i = *multi_i + 1; if next_i < *max_i { *multi_i = next_i; updated = true; next_storage_index += stride_i; break; } else { next_storage_index -= *multi_i * stride_i; *multi_i = 0 } } self.next_storage_index = if updated { Some(next_storage_index) } else { None }; Some(storage_index) } } #[derive(Debug)] pub enum StridedBlocks<'a> { SingleBlock { start_offset: usize, len: usize, }, MultipleBlocks { block_start_index: StridedIndex<'a>, block_len: usize, }, }
candle/candle-core/src/strided_index.rs/0
{ "file_path": "candle/candle-core/src/strided_index.rs", "repo_id": "candle", "token_count": 1148 }
64
import torch from collections import OrderedDict # Write a trivial tensor to a pt file a= torch.tensor([[1,2,3,4], [5,6,7,8]]) o = OrderedDict() o["test"] = a # Write a trivial tensor to a pt file torch.save(o, "test.pt") ############################################################################################################ # Write a trivial tensor to a pt file with a key torch.save({"model_state_dict": o}, "test_with_key.pt") ############################################################################################################ # Create a tensor with fortran contiguous memory layout import numpy as np # Step 1: Create a 3D NumPy array with Fortran order using a range of numbers # For example, creating a 2x3x4 array array_fortran = np.asfortranarray(np.arange(1, 2*3*4 + 1).reshape(2, 3, 4)) # Verify the memory order print("Is Fortran contiguous (F order):", array_fortran.flags['F_CONTIGUOUS']) # Should be True print("Is C contiguous (C order):", array_fortran.flags['C_CONTIGUOUS']) # Should be False # Step 2: Convert the NumPy array to a PyTorch tensor tensor_fortran = torch.from_numpy(array_fortran) # Verify the tensor layout print("Tensor stride:", tensor_fortran.stride()) # Stride will reflect the Fortran memory layout # Step 3: Save the PyTorch tensor to a .pth file torch.save({"tensor_fortran": tensor_fortran}, 'fortran_tensor_3d.pth') print("3D Tensor saved with Fortran layout.")
candle/candle-core/tests/pth.py/0
{ "file_path": "candle/candle-core/tests/pth.py", "repo_id": "candle", "token_count": 441 }
65
//! The CIFAR-10 dataset. //! //! The files can be downloaded from the following page: //! <https://www.cs.toronto.edu/~kriz/cifar.html> //! The binary version of the dataset is used. use crate::vision::Dataset; use candle::{DType, Device, Error, Result, Tensor}; use hf_hub::{api::sync::Api, Repo, RepoType}; use parquet::file::reader::{FileReader, SerializedFileReader}; use std::fs::File; use std::io::{BufReader, Read}; const W: usize = 32; const H: usize = 32; const C: usize = 3; const BYTES_PER_IMAGE: usize = W * H * C + 1; const SAMPLES_PER_FILE: usize = 10000; fn read_file(filename: &std::path::Path) -> Result<(Tensor, Tensor)> { let mut buf_reader = BufReader::new(File::open(filename)?); let mut data = vec![0u8; SAMPLES_PER_FILE * BYTES_PER_IMAGE]; buf_reader.read_exact(&mut data)?; let mut images = vec![]; let mut labels = vec![]; for index in 0..SAMPLES_PER_FILE { let content_offset = BYTES_PER_IMAGE * index; labels.push(data[content_offset]); images.push(&data[1 + content_offset..content_offset + BYTES_PER_IMAGE]); } let images: Vec<u8> = images .iter() .copied() .flatten() .copied() .collect::<Vec<_>>(); let labels = Tensor::from_vec(labels, SAMPLES_PER_FILE, &Device::Cpu)?; let images = Tensor::from_vec(images, (SAMPLES_PER_FILE, C, H, W), &Device::Cpu)?; let images = (images.to_dtype(DType::F32)? / 255.)?; Ok((images, labels)) } pub fn load_dir<T: AsRef<std::path::Path>>(dir: T) -> Result<Dataset> { let dir = dir.as_ref(); let (test_images, test_labels) = read_file(&dir.join("test_batch.bin"))?; let train_images_and_labels = [ "data_batch_1.bin", "data_batch_2.bin", "data_batch_3.bin", "data_batch_4.bin", "data_batch_5.bin", ] .iter() .map(|x| read_file(&dir.join(x))) .collect::<Result<Vec<_>>>()?; let (train_images, train_labels): (Vec<_>, Vec<_>) = train_images_and_labels.into_iter().unzip(); Ok(Dataset { train_images: Tensor::cat(&train_images, 0)?, train_labels: Tensor::cat(&train_labels, 0)?, test_images, test_labels, labels: 10, }) } fn load_parquet(parquet: SerializedFileReader<std::fs::File>) -> Result<(Tensor, Tensor)> { let samples = parquet.metadata().file_metadata().num_rows() as usize; let mut buffer_images: Vec<u8> = Vec::with_capacity(samples * 1_024); let mut buffer_labels: Vec<u8> = Vec::with_capacity(samples); for row in parquet.into_iter().flatten() { for (_name, field) in row.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); buffer_images.extend(image.to_rgb8().as_raw()); } } } else if let parquet::record::Field::Long(label) = field { buffer_labels.push(*label as u8); } } } let images = (Tensor::from_vec(buffer_images, (samples, 3, 32, 32), &Device::Cpu)? .to_dtype(DType::U8)? / 255.)?; let labels = Tensor::from_vec(buffer_labels, (samples,), &Device::Cpu)?; Ok((images, labels)) } pub fn load() -> Result<Dataset> { let api = Api::new().map_err(|e| Error::Msg(format!("Api error: {e}")))?; let dataset_id = "cifar10".to_string(); let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo .get("plain_text/test/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let train_parquet_filename = repo .get("plain_text/train/0000.parquet") .map_err(|e| Error::Msg(format!("Api error: {e}")))?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?) .map_err(|e| Error::Msg(format!("Parquet error: {e}")))?; let (test_images, test_labels) = load_parquet(test_parquet)?; let (train_images, train_labels) = load_parquet(train_parquet)?; Ok(crate::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }) }
candle/candle-datasets/src/vision/cifar.rs/0
{ "file_path": "candle/candle-datasets/src/vision/cifar.rs", "repo_id": "candle", "token_count": 2139 }
66
# candle-convnext [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) and [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808). This candle implementation uses a pre-trained ConvNeXt network for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example convnext --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which tiny loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 84.09% bicycle-built-for-two, tandem bicycle, tandem: 4.15% maillot : 0.74% crash helmet : 0.54% unicycle, monocycle : 0.44% ```
candle/candle-examples/examples/convnext/README.md/0
{ "file_path": "candle/candle-examples/examples/convnext/README.md", "repo_id": "candle", "token_count": 293 }
67
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{DType, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::encodec::{Config, Model}; use clap::{Parser, ValueEnum}; use hf_hub::api::sync::Api; mod audio_io; #[derive(Clone, Debug, Copy, PartialEq, Eq, ValueEnum)] enum Action { AudioToAudio, AudioToCode, CodeToAudio, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// The action to be performed, specifies the format for the input and output data. action: Action, /// The input file, either an audio file or some encodec tokens stored as safetensors. in_file: String, /// The output file, either a wave audio file or some encodec tokens stored as safetensors. out_file: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The model weight file, in safetensor format. #[arg(long)] model: Option<String>, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => Api::new()? .model("facebook/encodec_24khz".to_string()) .get("model.safetensors")?, }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let config = Config::default(); let model = Model::new(&config, vb)?; let codes = match args.action { Action::CodeToAudio => { let codes = candle::safetensors::load(args.in_file, &device)?; codes.get("codes").expect("no codes in input file").clone() } Action::AudioToCode | Action::AudioToAudio => { let pcm = if args.in_file == "-" { println!(">>>> RECORDING AUDIO, PRESS ENTER ONCE DONE <<<<"); let (stream, input_audio) = audio_io::setup_input_stream()?; let mut pcms = vec![]; let stdin = std::thread::spawn(|| { let mut s = String::new(); std::io::stdin().read_line(&mut s) }); while !stdin.is_finished() { let input = input_audio.lock().unwrap().take_all(); if input.is_empty() { std::thread::sleep(std::time::Duration::from_millis(100)); continue; } pcms.push(input) } drop(stream); pcms.concat() } else { let (pcm, sample_rate) = audio_io::pcm_decode(args.in_file)?; if sample_rate != 24_000 { println!("WARNING: encodec uses a 24khz sample rate, input uses {sample_rate}, resampling..."); audio_io::resample(&pcm, sample_rate as usize, 24_000)? } else { pcm } }; let pcm_len = pcm.len(); let pcm = Tensor::from_vec(pcm, (1, 1, pcm_len), &device)?; println!("input pcm shape: {:?}", pcm.shape()); model.encode(&pcm)? } }; println!("codes shape: {:?}", codes.shape()); match args.action { Action::AudioToCode => { codes.save_safetensors("codes", &args.out_file)?; } Action::AudioToAudio | Action::CodeToAudio => { let pcm = model.decode(&codes)?; println!("output pcm shape: {:?}", pcm.shape()); let pcm = pcm.i(0)?.i(0)?; let pcm = candle_examples::audio::normalize_loudness(&pcm, 24_000, true)?; let pcm = pcm.to_vec1::<f32>()?; if args.out_file == "-" { let (stream, ad) = audio_io::setup_output_stream()?; { let mut ad = ad.lock().unwrap(); ad.push_samples(&pcm)?; } loop { let ad = ad.lock().unwrap(); if ad.is_empty() { break; } // That's very weird, calling thread::sleep here triggers the stream to stop // playing (the callback doesn't seem to be called anymore). // std::thread::sleep(std::time::Duration::from_millis(100)); } drop(stream) } else { let mut output = std::fs::File::create(&args.out_file)?; candle_examples::wav::write_pcm_as_wav(&mut output, &pcm, 24_000)?; } } } Ok(()) }
candle/candle-examples/examples/encodec/main.rs/0
{ "file_path": "candle/candle-examples/examples/encodec/main.rs", "repo_id": "candle", "token_count": 2395 }
68
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; use candle_transformers::models::mamba::{Config, Model, State}; use candle::{DType, Device, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; struct TextGeneration { model: Model, config: Config, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, config: Config, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, config, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; self.tokenizer.clear(); let dtype = self.model.dtype(); let mut tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the </s> token"), }; let mut state = State::new(1, &self.config, dtype, &self.device)?; let mut next_logits = None; for &t in tokens.iter() { let input = Tensor::new(&[t], &self.device)?; let logits = self.model.forward(&input, &mut state)?; next_logits = Some(logits); if let Some(t) = self.tokenizer.next_token(t)? { print!("{t}") } } std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); for _ in 0..sample_len { let logits = match next_logits.as_ref() { Some(logits) => logits, None => anyhow::bail!("cannot work on an empty prompt"), }; let logits = logits.squeeze(0)?.to_dtype(dtype)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } let input = Tensor::new(&[next_token], &self.device)?; next_logits = Some(self.model.forward(&input, &mut state)?) } let dt = start_gen.elapsed(); if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } std::io::stdout().flush()?; println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Parser, ValueEnum, Clone, Copy, PartialEq, Eq, Debug)] enum Which { Mamba130m, Mamba370m, Mamba790m, Mamba1_4b, Mamba2_8b, Mamba2_8bSlimPj, } impl std::fmt::Display for Which { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", self) } } impl Which { fn model_id(&self) -> &'static str { match self { Self::Mamba130m => "state-spaces/mamba-130m", Self::Mamba370m => "state-spaces/mamba-370m", Self::Mamba790m => "state-spaces/mamba-790m", Self::Mamba1_4b => "state-spaces/mamba-1.4b", Self::Mamba2_8b => "state-spaces/mamba-2.8b", Self::Mamba2_8bSlimPj => "state-spaces/mamba-2.8b-slimpj'", } } fn revision(&self) -> &'static str { match self { Self::Mamba130m | Self::Mamba370m | Self::Mamba790m | Self::Mamba1_4b | Self::Mamba2_8bSlimPj => "refs/pr/1", Self::Mamba2_8b => "refs/pr/4", } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] prompt: String, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long, default_value = "mamba130m")] which: Which, #[arg(long)] model_id: Option<String>, #[arg(long)] revision: Option<String>, #[arg(long)] tokenizer_file: Option<String>, #[arg(long)] weight_files: Option<String>, #[arg(long)] config_file: Option<String>, #[arg(long, default_value = "f32")] dtype: String, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { use std::str::FromStr; use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id .unwrap_or_else(|| args.which.model_id().to_string()), RepoType::Model, args.revision .unwrap_or_else(|| args.which.revision().to_string()), )); let tokenizer_filename = match args.tokenizer_file { Some(file) => std::path::PathBuf::from(file), None => api .model("EleutherAI/gpt-neox-20b".to_string()) .get("tokenizer.json")?, }; let config_filename = match args.config_file { Some(file) => std::path::PathBuf::from(file), None => repo.get("config.json")?, }; let filenames = match args.weight_files { Some(files) => files .split(',') .map(std::path::PathBuf::from) .collect::<Vec<_>>(), None => { vec![repo.get("model.safetensors")?] } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config: Config = serde_json::from_slice(&std::fs::read(config_filename)?)?; let device = candle_examples::device(args.cpu)?; let dtype = DType::from_str(&args.dtype)?; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let model = Model::new(&config, vb.pp("backbone"))?; println!("loaded the model in {:?}", start.elapsed()); let mut pipeline = TextGeneration::new( model, config, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, &device, ); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/mamba/main.rs/0
{ "file_path": "candle/candle-examples/examples/mamba/main.rs", "repo_id": "candle", "token_count": 4425 }
69
use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{ embedding, layer_norm, linear_no_bias, Activation, Embedding, LayerNorm, Linear, Module, VarBuilder, }; use candle_transformers::models::{encodec, t5}; // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/models/musicgen/configuration_musicgen.py#L83 #[derive(Debug, Clone, PartialEq)] pub struct Config { vocab_size: usize, max_position_embeddings: usize, num_hidden_layers: usize, ffn_dim: usize, num_attention_heads: usize, layerdrop: f64, use_cache: bool, activation_function: Activation, hidden_size: usize, dropout: f64, attention_dropout: f64, activation_dropout: f64, initializer_factor: f64, scale_embedding: bool, num_codebooks: usize, pad_token_id: usize, bos_token_id: usize, eos_token_id: Option<usize>, tie_word_embeddings: bool, } impl Default for Config { fn default() -> Self { Self { vocab_size: 2048, max_position_embeddings: 2048, num_hidden_layers: 24, ffn_dim: 4096, num_attention_heads: 16, layerdrop: 0.0, use_cache: true, activation_function: Activation::Gelu, hidden_size: 1024, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, initializer_factor: 0.02, scale_embedding: false, num_codebooks: 4, pad_token_id: 2048, bos_token_id: 2048, eos_token_id: None, tie_word_embeddings: false, } } } impl Config { fn musicgen_small() -> Self { Self { vocab_size: 2048, max_position_embeddings: 2048, num_hidden_layers: 24, ffn_dim: 4096, num_attention_heads: 16, layerdrop: 0.0, use_cache: true, activation_function: Activation::Gelu, hidden_size: 1024, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, initializer_factor: 0.02, scale_embedding: false, num_codebooks: 4, pad_token_id: 2048, bos_token_id: 2048, eos_token_id: None, tie_word_embeddings: false, } } } fn get_embedding(num_embeddings: usize, embedding_dim: usize) -> Result<Tensor> { let half_dim = embedding_dim / 2; let emb = f64::ln(10000.) / (half_dim - 1) as f64; let xs: Vec<_> = (0..num_embeddings).map(|v| v as f32).collect(); let xs = Tensor::from_vec(xs, (num_embeddings, 1), &Device::Cpu)?; let ys: Vec<_> = (0..half_dim) .map(|v| f64::exp(v as f64 * -emb) as f32) .collect(); let ys = Tensor::from_vec(ys, (1, half_dim), &Device::Cpu)?; let shape = (num_embeddings, half_dim); let emb = (xs.broadcast_as(shape)? * ys.broadcast_as(shape)?)?; let emb = Tensor::cat(&[&emb.cos()?, &emb.sin()?], 1)?.reshape((num_embeddings, 2 * half_dim))?; let emb = if embedding_dim % 2 == 1 { let zeros = Tensor::zeros((num_embeddings, 1), DType::F32, &Device::Cpu)?; Tensor::cat(&[&emb, &zeros], 1)? } else { emb }; Ok(emb) } #[derive(Debug)] struct MusicgenSinusoidalPositionalEmbedding { num_positions: usize, embedding_dim: usize, weights: Tensor, } impl MusicgenSinusoidalPositionalEmbedding { fn load(_vb: VarBuilder, cfg: &Config) -> Result<Self> { let num_positions = cfg.max_position_embeddings; let embedding_dim = cfg.hidden_size; let weights = get_embedding(num_positions, embedding_dim)?; Ok(Self { num_positions, embedding_dim, weights, }) } fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (_b_sz, _codebooks, seq_len) = input_ids.dims3()?; if seq_len > self.weights.dim(0)? { self.weights = get_embedding(seq_len, self.embedding_dim)? } self.weights.narrow(0, 0, seq_len) } } #[derive(Debug)] struct MusicgenAttention { scaling: f64, is_decoder: bool, num_heads: usize, head_dim: usize, k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, } impl MusicgenAttention { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = h / num_heads; let k_proj = linear_no_bias(h, h, vb.pp("k_proj"))?; let v_proj = linear_no_bias(h, h, vb.pp("v_proj"))?; let q_proj = linear_no_bias(h, h, vb.pp("q_proj"))?; let out_proj = linear_no_bias(h, h, vb.pp("out_proj"))?; Ok(Self { scaling: 1. / (head_dim as f64).sqrt(), is_decoder: true, num_heads, head_dim, k_proj, v_proj, q_proj, out_proj, }) } fn forward( &mut self, xs: &Tensor, kv_states: Option<&Tensor>, attention_mask: &Tensor, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (self.q_proj.forward(xs)? * self.scaling)?; let kv_states = kv_states.unwrap_or(xs); let key_states = self.k_proj.forward(kv_states)?; let value_states = self.v_proj.forward(kv_states)?; let tgt = (b_sz, tgt_len, self.num_heads, self.head_dim); let query_states = query_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let key_states = key_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let value_states = value_states.reshape(tgt)?.transpose(1, 2)?.contiguous()?; let src_len = key_states.dim(1)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let attn_weights = attn_weights .reshape((b_sz, self.num_heads, tgt_len, src_len))? .broadcast_add(attention_mask)?; let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?; // TODO: layer_head_mask? let attn_output = attn_weights .matmul(&value_states)? .reshape((b_sz, self.num_heads, tgt_len, self.head_dim))? .transpose(1, 2)? .reshape((b_sz, tgt_len, self.num_heads * self.head_dim))?; let attn_output = self.out_proj.forward(&attn_output)?; Ok(attn_output) } } #[derive(Debug)] struct MusicgenDecoderLayer { self_attn: MusicgenAttention, self_attn_layer_norm: LayerNorm, encoder_attn: MusicgenAttention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, activation_fn: Activation, } impl MusicgenDecoderLayer { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let self_attn = MusicgenAttention::load(vb.pp("self_attn"), cfg)?; let self_attn_layer_norm = layer_norm(h, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn = MusicgenAttention::load(vb.pp("encoder_attn"), cfg)?; let encoder_attn_layer_norm = layer_norm(h, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear_no_bias(h, cfg.ffn_dim, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.ffn_dim, h, vb.pp("fc2"))?; let final_layer_norm = layer_norm(h, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, activation_fn: cfg.activation_function, }) } fn forward( &mut self, xs: &Tensor, attention_mask: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs.clone(); let xs = self.self_attn_layer_norm.forward(xs)?; let xs = self.self_attn.forward(&xs, None, attention_mask)?; let mut xs = (xs + residual)?; if let Some(encoder_hidden_states) = &encoder_hidden_states { let residual = xs.clone(); let encoder_attention_mask = attention_mask.clone(); // TODO xs = self.encoder_attn.forward( &xs, Some(encoder_hidden_states), &encoder_attention_mask, )?; xs = (xs + residual)? } let residual = xs.clone(); let xs = self.final_layer_norm.forward(&xs)?; let xs = self.fc1.forward(&xs)?; let xs = self.activation_fn.forward(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = (xs + residual)?; Ok(xs) } } #[derive(Debug)] struct MusicgenDecoder { embed_tokens: Vec<Embedding>, embed_positions: MusicgenSinusoidalPositionalEmbedding, layers: Vec<MusicgenDecoderLayer>, layer_norm: LayerNorm, embed_scale: f64, num_codebooks: usize, d_model: usize, } impl MusicgenDecoder { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let embed_scale = if cfg.scale_embedding { (h as f64).sqrt() } else { 1. }; let embed_dim = cfg.vocab_size + 1; let embed_tokens = (0..cfg.num_codebooks) .map(|i| embedding(embed_dim, h, vb.pp(&format!("embed_tokens.{i}")))) .collect::<Result<Vec<_>>>()?; let embed_positions = MusicgenSinusoidalPositionalEmbedding::load(vb.clone(), cfg)?; let layers = (0..cfg.num_hidden_layers) .map(|i| MusicgenDecoderLayer::load(vb.pp(&format!("layers.{i}")), cfg)) .collect::<Result<Vec<_>>>()?; let layer_norm = layer_norm(h, 1e-5, vb.pp("layer_norm"))?; Ok(Self { embed_tokens, embed_positions, layers, layer_norm, embed_scale, num_codebooks: cfg.num_codebooks, d_model: cfg.hidden_size, }) } fn prepare_decoder_attention_mask(&self, _b_sz: usize, _seq_len: usize) -> Result<Tensor> { todo!() } fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let dev = input_ids.device(); let (b_sz_times_codebooks, seq_len) = input_ids.dims2()?; let b_sz = b_sz_times_codebooks / self.num_codebooks; let input = input_ids.reshape((b_sz, self.num_codebooks, seq_len))?; let mut inputs_embeds = Tensor::zeros((b_sz, seq_len, self.d_model), DType::F32, dev)?; for (idx, codebook) in self.embed_tokens.iter().enumerate() { let inp = input.narrow(1, idx, 1)?.squeeze(1)?; inputs_embeds = (inputs_embeds + codebook.forward(&inp)?)? } let inputs_embeds = inputs_embeds; let positions = self.embed_positions.forward(&input)?.to_device(dev)?; let mut xs = inputs_embeds.broadcast_add(&positions)?; let attention_mask = self.prepare_decoder_attention_mask(b_sz, seq_len)?; for decoder_layer in self.layers.iter_mut() { xs = decoder_layer.forward(&xs, &attention_mask, None)?; } let xs = self.layer_norm.forward(&xs)?; Ok(xs) } } #[derive(Debug)] pub struct MusicgenForCausalLM { decoder: MusicgenDecoder, lm_heads: Vec<Linear>, num_codebooks: usize, vocab_size: usize, } impl MusicgenForCausalLM { pub fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let decoder = MusicgenDecoder::load(vb.pp("model.decoder"), cfg)?; let lm_heads = (0..cfg.num_codebooks) .map(|i| linear_no_bias(h, cfg.vocab_size, vb.pp(&format!("lm_heads.{i}")))) .collect::<Result<Vec<_>>>()?; Ok(Self { decoder, lm_heads, num_codebooks: cfg.num_codebooks, vocab_size: cfg.vocab_size, }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let hidden_states = self.decoder.forward(input_ids)?; let lm_logits = self .lm_heads .iter() .map(|h| h.forward(&hidden_states)) .collect::<Result<Vec<_>>>()?; let lm_logits = Tensor::stack(&lm_logits, 1)?.reshape(( b_sz * self.num_codebooks, seq_len, self.vocab_size, ))?; Ok(lm_logits) } } #[derive(Debug)] pub struct MusicgenForConditionalGeneration { pub text_encoder: t5::T5EncoderModel, pub audio_encoder: encodec::Model, pub decoder: MusicgenForCausalLM, cfg: GenConfig, } #[derive(Debug, Clone, PartialEq)] pub struct GenConfig { musicgen: Config, t5: t5::Config, encodec: encodec::Config, } impl GenConfig { pub fn small() -> Self { // https://huggingface.co/facebook/musicgen-small/blob/495da4ad086b3416a27c6187f9239f9fd96f3962/config.json#L6 let encodec = encodec::Config { audio_channels: 1, chunk_length_s: None, codebook_dim: Some(128), codebook_size: 2048, compress: 2, dilation_growth_rate: 2, hidden_size: 128, kernel_size: 7, last_kernel_size: 7, norm_type: encodec::NormType::WeightNorm, normalize: false, num_filters: 64, num_lstm_layers: 2, num_residual_layers: 1, overlap: None, // This should be Reflect and not Replicate but Reflect does not work yet. pad_mode: encodec::PadMode::Replicate, residual_kernel_size: 3, sampling_rate: 32_000, target_bandwidths: vec![2.2], trim_right_ratio: 1.0, upsampling_ratios: vec![8, 5, 4, 4], use_causal_conv: false, use_conv_shortcut: false, }; Self { musicgen: Config::musicgen_small(), t5: t5::Config::musicgen_small(), encodec, } } } impl MusicgenForConditionalGeneration { pub fn config(&self) -> &GenConfig { &self.cfg } pub fn load(vb: VarBuilder, cfg: GenConfig) -> Result<Self> { let text_encoder = t5::T5EncoderModel::load(vb.pp("text_encoder"), &cfg.t5)?; let audio_encoder = encodec::Model::new(&cfg.encodec, vb.pp("audio_encoder"))?; let decoder = MusicgenForCausalLM::load(vb.pp("decoder"), &cfg.musicgen)?; Ok(Self { text_encoder, audio_encoder, decoder, cfg, }) } }
candle/candle-examples/examples/musicgen/musicgen_model.rs/0
{ "file_path": "candle/candle-examples/examples/musicgen/musicgen_model.rs", "repo_id": "candle", "token_count": 7592 }
70
# candle-recurrent-gemma This model card corresponds to the 2B base version of the RecurrentGemma model [huggingface model card](https://huggingface.co/google/recurrentgemma-2b). ```bash cargo run --features cuda -r --example recurrent-gemma -- \ --prompt "Write me a poem about Machine Learning." ```
candle/candle-examples/examples/recurrent-gemma/README.md/0
{ "file_path": "candle/candle-examples/examples/recurrent-gemma/README.md", "repo_id": "candle", "token_count": 101 }
71
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::resnet; use clap::{Parser, ValueEnum}; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { #[value(name = "18")] Resnet18, #[value(name = "34")] Resnet34, #[value(name = "50")] Resnet50, #[value(name = "101")] Resnet101, #[value(name = "152")] Resnet152, } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Variant of the model to use. #[arg(value_enum, long, default_value_t = Which::Resnet18)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("lmz/candle-resnet".into()); let filename = match args.which { Which::Resnet18 => "resnet18.safetensors", Which::Resnet34 => "resnet34.safetensors", Which::Resnet50 => "resnet50.safetensors", Which::Resnet101 => "resnet101.safetensors", Which::Resnet152 => "resnet152.safetensors", }; api.get(filename)? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let class_count = candle_examples::imagenet::CLASS_COUNT as usize; let model = match args.which { Which::Resnet18 => resnet::resnet18(class_count, vb)?, Which::Resnet34 => resnet::resnet34(class_count, vb)?, Which::Resnet50 => resnet::resnet50(class_count, vb)?, Which::Resnet101 => resnet::resnet101(class_count, vb)?, Which::Resnet152 => resnet::resnet152(class_count, vb)?, }; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/resnet/main.rs/0
{ "file_path": "candle/candle-examples/examples/resnet/main.rs", "repo_id": "candle", "token_count": 1288 }
72
// https://github.com/openai/whisper/blob/main/whisper/model.py/rgs // TODO: // - Batch size greater than 1. // - More token filters (SuppressBlanks, ApplyTimestampRules). #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{Device, IndexOp, Tensor}; use candle_nn::{ops::softmax, VarBuilder}; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use rand::{distributions::Distribution, SeedableRng}; use tokenizers::Tokenizer; mod multilingual; mod pcm_decode; use candle_transformers::models::whisper::{self as m, audio, Config}; pub enum Model { Normal(m::model::Whisper), Quantized(m::quantized_model::Whisper), } // Maybe we should use some traits rather than doing the dispatch for all these. impl Model { pub fn config(&self) -> &Config { match self { Self::Normal(m) => &m.config, Self::Quantized(m) => &m.config, } } pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.encoder.forward(x, flush), Self::Quantized(m) => m.encoder.forward(x, flush), } } pub fn decoder_forward( &mut self, x: &Tensor, xa: &Tensor, flush: bool, ) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.forward(x, xa, flush), Self::Quantized(m) => m.decoder.forward(x, xa, flush), } } pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.final_linear(x), Self::Quantized(m) => m.decoder.final_linear(x), } } } #[allow(dead_code)] #[derive(Debug, Clone)] struct DecodingResult { tokens: Vec<u32>, text: String, avg_logprob: f64, no_speech_prob: f64, temperature: f64, compression_ratio: f64, } #[allow(dead_code)] #[derive(Debug, Clone)] struct Segment { start: f64, duration: f64, dr: DecodingResult, } struct Decoder { model: Model, rng: rand::rngs::StdRng, task: Option<Task>, timestamps: bool, verbose: bool, tokenizer: Tokenizer, suppress_tokens: Tensor, sot_token: u32, transcribe_token: u32, translate_token: u32, eot_token: u32, no_speech_token: u32, no_timestamps_token: u32, language_token: Option<u32>, } impl Decoder { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, device: &Device, language_token: Option<u32>, task: Option<Task>, timestamps: bool, verbose: bool, ) -> Result<Self> { let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?; // Suppress the notimestamps token when in timestamps mode. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452 let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32) .map(|i| { if model.config().suppress_tokens.contains(&i) || timestamps && i == no_timestamps_token { f32::NEG_INFINITY } else { 0f32 } }) .collect(); let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?; let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?; let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?; let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?; let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?; let no_speech_token = m::NO_SPEECH_TOKENS .iter() .find_map(|token| token_id(&tokenizer, token).ok()); let no_speech_token = match no_speech_token { None => anyhow::bail!("unable to find any non-speech token"), Some(n) => n, }; Ok(Self { model, rng: rand::rngs::StdRng::seed_from_u64(seed), tokenizer, task, timestamps, verbose, suppress_tokens, sot_token, transcribe_token, translate_token, eot_token, no_speech_token, language_token, no_timestamps_token, }) } fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> { let model = &mut self.model; let audio_features = model.encoder_forward(mel, true)?; if self.verbose { println!("audio features: {:?}", audio_features.dims()); } let sample_len = model.config().max_target_positions / 2; let mut sum_logprob = 0f64; let mut no_speech_prob = f64::NAN; let mut tokens = vec![self.sot_token]; if let Some(language_token) = self.language_token { tokens.push(language_token); } match self.task { None | Some(Task::Transcribe) => tokens.push(self.transcribe_token), Some(Task::Translate) => tokens.push(self.translate_token), } if !self.timestamps { tokens.push(self.no_timestamps_token); } for i in 0..sample_len { let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?; // The model expects a batch dim but this inference loop does not handle // it so we add it at this point. let tokens_t = tokens_t.unsqueeze(0)?; let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?; // Extract the no speech probability on the first iteration by looking at the first // token logits and the probability for the according token. if i == 0 { let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; no_speech_prob = softmax(&logits, 0)? .i(self.no_speech_token as usize)? .to_scalar::<f32>()? as f64; } let (_, seq_len, _) = ys.dims3()?; let logits = model .decoder_final_linear(&ys.i((..1, seq_len - 1..))?)? .i(0)? .i(0)?; // TODO: Besides suppress tokens, we should apply the heuristics from // ApplyTimestampRules, i.e.: // - Timestamps come in pairs, except before EOT. // - Timestamps should be non-decreasing. // - If the sum of the probabilities of timestamps is higher than any other tokens, // only consider timestamps when sampling. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439 let logits = logits.broadcast_add(&self.suppress_tokens)?; let next_token = if t > 0f64 { let prs = softmax(&(&logits / t)?, 0)?; let logits_v: Vec<f32> = prs.to_vec1()?; let distr = rand::distributions::WeightedIndex::new(&logits_v)?; distr.sample(&mut self.rng) as u32 } else { let logits_v: Vec<f32> = logits.to_vec1()?; logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap() }; tokens.push(next_token); let prob = softmax(&logits, candle::D::Minus1)? .i(next_token as usize)? .to_scalar::<f32>()? as f64; if next_token == self.eot_token || tokens.len() > model.config().max_target_positions { break; } sum_logprob += prob.ln(); } let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?; let avg_logprob = sum_logprob / tokens.len() as f64; Ok(DecodingResult { tokens, text, avg_logprob, no_speech_prob, temperature: t, compression_ratio: f64::NAN, }) } fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> { for (i, &t) in m::TEMPERATURES.iter().enumerate() { let dr: Result<DecodingResult> = self.decode(segment, t); if i == m::TEMPERATURES.len() - 1 { return dr; } // On errors, we try again with a different temperature. match dr { Ok(dr) => { let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD || dr.avg_logprob < m::LOGPROB_THRESHOLD; if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD { return Ok(dr); } } Err(err) => { println!("Error running at {t}: {err}") } } } unreachable!() } fn run(&mut self, mel: &Tensor) -> Result<Vec<Segment>> { let (_, _, content_frames) = mel.dims3()?; let mut seek = 0; let mut segments = vec![]; while seek < content_frames { let start = std::time::Instant::now(); let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let segment_size = usize::min(content_frames - seek, m::N_FRAMES); let mel_segment = mel.narrow(2, seek, segment_size)?; let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let dr = self.decode_with_fallback(&mel_segment)?; seek += segment_size; if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD { println!("no speech detected, skipping {seek} {dr:?}"); continue; } let segment = Segment { start: time_offset, duration: segment_duration, dr, }; if self.timestamps { println!( "{:.1}s -- {:.1}s", segment.start, segment.start + segment.duration, ); let mut tokens_to_decode = vec![]; let mut prev_timestamp_s = 0f32; for &token in segment.dr.tokens.iter() { if token == self.sot_token || token == self.eot_token { continue; } // The no_timestamp_token is the last before the timestamp ones. if token > self.no_timestamps_token { let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.; if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text); tokens_to_decode.clear() } prev_timestamp_s = timestamp_s; } else { tokens_to_decode.push(token) } } if !tokens_to_decode.is_empty() { let text = self .tokenizer .decode(&tokens_to_decode, true) .map_err(E::msg)?; if !text.is_empty() { println!(" {:.1}s-...: {}", prev_timestamp_s, text); } tokens_to_decode.clear() } } else { println!( "{:.1}s -- {:.1}s: {}", segment.start, segment.start + segment.duration, segment.dr.text, ) } if self.verbose { println!("{seek}: {segment:?}, in {:?}", start.elapsed()); } segments.push(segment) } Ok(segments) } } pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> { match tokenizer.token_to_id(token) { None => candle::bail!("no token-id for {token}"), Some(id) => Ok(id), } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Task { Transcribe, Translate, } #[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)] enum WhichModel { Tiny, #[value(name = "tiny.en")] TinyEn, Base, #[value(name = "base.en")] BaseEn, Small, #[value(name = "small.en")] SmallEn, Medium, #[value(name = "medium.en")] MediumEn, Large, LargeV2, LargeV3, #[value(name = "distil-medium.en")] DistilMediumEn, #[value(name = "distil-large-v2")] DistilLargeV2, #[value(name = "distil-large-v3")] DistilLargeV3, } impl WhichModel { fn is_multilingual(&self) -> bool { match self { Self::Tiny | Self::Base | Self::Small | Self::Medium | Self::Large | Self::LargeV2 | Self::LargeV3 | Self::DistilLargeV2 | Self::DistilLargeV3 => true, Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => { false } } } fn model_and_revision(&self) -> (&'static str, &'static str) { match self { Self::Tiny => ("openai/whisper-tiny", "main"), Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"), Self::Base => ("openai/whisper-base", "refs/pr/22"), Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"), Self::Small => ("openai/whisper-small", "main"), Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"), Self::Medium => ("openai/whisper-medium", "main"), Self::MediumEn => ("openai/whisper-medium.en", "main"), Self::Large => ("openai/whisper-large", "refs/pr/36"), Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"), Self::LargeV3 => ("openai/whisper-large-v3", "main"), Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"), Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"), Self::DistilLargeV3 => ("distil-whisper/distil-large-v3", "main"), } } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] model_id: Option<String>, /// The model to use, check out available models: /// https://huggingface.co/models?search=whisper #[arg(long)] revision: Option<String>, /// The model to be used, can be tiny, small, medium. #[arg(long, default_value = "tiny.en")] model: WhichModel, /// The input to be processed, in wav format, will default to `jfk.wav`. Alternatively /// this can be set to sample:jfk, sample:gb1, ... to fetch a sample from the following /// repo: https://huggingface.co/datasets/Narsil/candle_demo/ #[arg(long)] input: Option<String>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, #[arg(long)] quantized: bool, /// Language. #[arg(long)] language: Option<String>, /// Task, when no task is specified, the input tokens contain only the sot token which can /// improve things when in no-timestamp mode. #[arg(long)] task: Option<Task>, /// Timestamps mode, this is not fully implemented yet. #[arg(long)] timestamps: bool, /// Print the full DecodingResult structure rather than just the text. #[arg(long)] verbose: bool, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let device = candle_examples::device(args.cpu)?; let (default_model, default_revision) = if args.quantized { ("lmz/candle-whisper", "main") } else { args.model.model_and_revision() }; let default_model = default_model.to_string(); let default_revision = default_revision.to_string(); let (model_id, revision) = match (args.model_id, args.revision) { (Some(model_id), Some(revision)) => (model_id, revision), (Some(model_id), None) => (model_id, "main".to_string()), (None, Some(revision)) => (default_model, revision), (None, None) => (default_model, default_revision), }; let (config_filename, tokenizer_filename, weights_filename, input) = { let api = Api::new()?; let dataset = api.dataset("Narsil/candle-examples".to_string()); let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let sample = if let Some(input) = args.input { if let Some(sample) = input.strip_prefix("sample:") { dataset.get(&format!("samples_{sample}.wav"))? } else { std::path::PathBuf::from(input) } } else { println!("No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav"); dataset.get("samples_jfk.wav")? }; let (config, tokenizer, model) = if args.quantized { let ext = match args.model { WhichModel::TinyEn => "tiny-en", WhichModel::Tiny => "tiny", _ => unimplemented!("no quantized support for {:?}", args.model), }; ( repo.get(&format!("config-{ext}.json"))?, repo.get(&format!("tokenizer-{ext}.json"))?, repo.get(&format!("model-{ext}-q80.gguf"))?, ) } else { let config = repo.get("config.json")?; let tokenizer = repo.get("tokenizer.json")?; let model = repo.get("model.safetensors")?; (config, tokenizer, model) }; (config, tokenizer, model, sample) }; let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?; let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let mel_bytes = match config.num_mel_bins { 80 => include_bytes!("melfilters.bytes").as_slice(), 128 => include_bytes!("melfilters128.bytes").as_slice(), nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"), }; let mut mel_filters = vec![0f32; mel_bytes.len() / 4]; <byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters); let (pcm_data, sample_rate) = pcm_decode::pcm_decode(input)?; if sample_rate != m::SAMPLE_RATE as u32 { anyhow::bail!("input file must have a {} sampling rate", m::SAMPLE_RATE) } println!("pcm data loaded {}", pcm_data.len()); let mel = audio::pcm_to_mel(&config, &pcm_data, &mel_filters); let mel_len = mel.len(); let mel = Tensor::from_vec( mel, (1, config.num_mel_bins, mel_len / config.num_mel_bins), &device, )?; println!("loaded mel: {:?}", mel.dims()); let mut model = if args.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &weights_filename, &device, )?; Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?) } else { let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? }; Model::Normal(m::model::Whisper::load(&vb, config)?) }; let language_token = match (args.model.is_multilingual(), args.language) { (true, None) => Some(multilingual::detect_language(&mut model, &tokenizer, &mel)?), (false, None) => None, (true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) { Ok(token_id) => Some(token_id), Err(_) => anyhow::bail!("language {language} is not supported"), }, (false, Some(_)) => { anyhow::bail!("a language cannot be set for non-multilingual models") } }; let mut dc = Decoder::new( model, tokenizer, args.seed, &device, language_token, args.task, args.timestamps, args.verbose, )?; dc.run(&mel)?; Ok(()) }
candle/candle-examples/examples/whisper/main.rs/0
{ "file_path": "candle/candle-examples/examples/whisper/main.rs", "repo_id": "candle", "token_count": 10793 }
73
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool Varlen=true> struct BlockInfo { template<typename Params> __device__ BlockInfo(const Params &params, const int bidb) : sum_s_q(!Varlen || params.cu_seqlens_q == nullptr ? -1 : params.cu_seqlens_q[bidb]) , sum_s_k(!Varlen || params.cu_seqlens_k == nullptr || !params.is_seqlens_k_cumulative ? -1 : params.cu_seqlens_k[bidb]) , actual_seqlen_q(!Varlen || params.cu_seqlens_q == nullptr ? params.seqlen_q : params.cu_seqlens_q[bidb + 1] - sum_s_q) // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb]. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K. , seqlen_k_cache(!Varlen || params.cu_seqlens_k == nullptr ? params.seqlen_k : (params.is_seqlens_k_cumulative ? params.cu_seqlens_k[bidb + 1] - sum_s_k : params.cu_seqlens_k[bidb])) , actual_seqlen_k(params.seqused_k ? params.seqused_k[bidb] : seqlen_k_cache + (params.knew_ptr == nullptr ? 0 : params.seqlen_knew)) { } template <typename index_t> inline __device__ index_t q_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_q == -1 ? bidb * batch_stride : uint32_t(sum_s_q) * row_stride; } template <typename index_t> inline __device__ index_t k_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_k == -1 ? bidb * batch_stride : uint32_t(sum_s_k) * row_stride; } const int sum_s_q; const int sum_s_k; const int actual_seqlen_q; // We have to have seqlen_k_cache declared before actual_seqlen_k, otherwise actual_seqlen_k is set to 0. const int seqlen_k_cache; const int actual_seqlen_k; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/block_info.h/0
{ "file_path": "candle/candle-flash-attn/kernels/block_info.h", "repo_id": "candle", "token_count": 851 }
74
fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=src/compatibility.cuh"); println!("cargo:rerun-if-changed=src/cuda_utils.cuh"); println!("cargo:rerun-if-changed=src/binary_op_macros.cuh"); let builder = bindgen_cuda::Builder::default(); println!("cargo:info={builder:?}"); let bindings = builder.build_ptx().unwrap(); bindings.write("src/lib.rs").unwrap(); }
candle/candle-kernels/build.rs/0
{ "file_path": "candle/candle-kernels/build.rs", "repo_id": "candle", "token_count": 177 }
75
[package] name = "candle-metal-kernels" version = "0.5.1" edition = "2021" description = "Metal kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] metal = { version = "0.27.0", features = ["mps"] } once_cell = "1.18.0" thiserror = "1" tracing = "0.1.37" [dev-dependencies] half = { version = "2.3.1", features = [ "num-traits", "use-intrinsics", "rand_distr", ] } rand = "0.8.5"
candle/candle-metal-kernels/Cargo.toml/0
{ "file_path": "candle/candle-metal-kernels/Cargo.toml", "repo_id": "candle", "token_count": 218 }
76
use metal::{Buffer, ComputeCommandEncoderRef, ComputePipelineState, MTLSize}; use std::ffi::c_void; /// Most kernels apply similarly across the tensors /// This creates a strategy that uses the maximum amount of threads per threadgroup (capped at the /// actual total buffer length). /// Then kernels can just do their op on their single point in the buffer. pub(crate) fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) { let size = length as u64; let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size); let count = (size + width - 1) / width; let thread_group_count = MTLSize { width: count, height: 1, depth: 1, }; let thread_group_size = MTLSize { width, height: 1, depth: 1, }; (thread_group_count, thread_group_size) } // https://github.com/ml-explore/mlx/blob/bddf23f175726a57f0e443cd45518c0757daa166/mlx/backend/metal/utils.h#L96 pub(crate) fn get_block_dims(dim0: u64, dim1: u64, dim2: u64) -> MTLSize { let mut pows0 = 0u64; let mut pows1 = 0u64; let mut pows2 = 0u64; let mut sum = 0u64; loop { let presum = sum; // Check all the pows if dim0 >= (1 << (pows0 + 1)) { pows0 += 1; sum += 1; } if sum == 10 { break; } if dim1 >= (1 << (pows1 + 1)) { pows1 += 1; sum += 1; } if sum == 10 { break; } if dim2 >= (1 << (pows2 + 1)) { pows2 += 1; sum += 1; } if sum == presum || sum == 10 { break; } } MTLSize { width: 1 << pows0, height: 1 << pows1, depth: 1 << pows2, } } pub(crate) fn set_param<P: EncoderParam>( encoder: &ComputeCommandEncoderRef, position: u64, data: P, ) { <P as EncoderParam>::set_param(encoder, position, data) } /// Helper functions to create the various objects on the compute command encoder /// on a single line. /// Prevents getting wrong some arguments number and mixing length and size in bytes. pub(crate) trait EncoderParam { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self); } macro_rules! primitive { ($type:ty) => { impl EncoderParam for $type { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of::<$type>() as u64, &data as *const $type as *const c_void, ); } } }; } primitive!(bool); primitive!(usize); primitive!(i32); primitive!(i64); primitive!(u32); primitive!(u64); primitive!(f32); pub struct BufferOffset<'a> { pub buffer: &'a Buffer, pub offset_in_bytes: usize, } impl<'a> BufferOffset<'a> { pub fn zero_offset(buffer: &'a Buffer) -> Self { Self { buffer, offset_in_bytes: 0, } } } impl<T> EncoderParam for &[T] { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of_val(data) as u64, data.as_ptr() as *const c_void, ); } } impl EncoderParam for &Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } impl<'a> EncoderParam for &BufferOffset<'a> { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.buffer), data.offset_in_bytes as u64); } } impl EncoderParam for &mut Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&mut Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } #[macro_export] macro_rules! set_params { ($encoder:ident, ($($param:expr),+)) => ( let mut _index = 0; $( $crate::utils::set_param($encoder, _index, $param); _index += 1; )* ); }
candle/candle-metal-kernels/src/utils.rs/0
{ "file_path": "candle/candle-metal-kernels/src/utils.rs", "repo_id": "candle", "token_count": 2129 }
77
//! Embedding Layer. use candle::{Result, Tensor}; #[derive(Clone, Debug)] pub struct Embedding { embeddings: Tensor, hidden_size: usize, } impl Embedding { pub fn new(embeddings: Tensor, hidden_size: usize) -> Self { Self { embeddings, hidden_size, } } pub fn embeddings(&self) -> &Tensor { &self.embeddings } /// Get the hidden size of the embedding matrix pub fn hidden_size(&self) -> usize { self.hidden_size } } impl crate::Module for Embedding { fn forward(&self, indexes: &Tensor) -> Result<Tensor> { let mut final_dims = indexes.dims().to_vec(); final_dims.push(self.hidden_size); let indexes = indexes.flatten_all()?; let values = self.embeddings.index_select(&indexes, 0)?; let values = values.reshape(final_dims)?; Ok(values) } } pub fn embedding(in_size: usize, out_size: usize, vb: crate::VarBuilder) -> Result<Embedding> { let embeddings = vb.get_with_hints( (in_size, out_size), "weight", crate::Init::Randn { mean: 0., stdev: 1., }, )?; Ok(Embedding::new(embeddings, out_size)) }
candle/candle-nn/src/embedding.rs/0
{ "file_path": "candle/candle-nn/src/embedding.rs", "repo_id": "candle", "token_count": 571 }
78
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle::{test_utils, DType, Device, Tensor}; use candle_nn::{batch_norm, BatchNorm, BatchNormConfig, VarBuilder, VarMap}; /* The test below has been generated using the following PyTorch code: import torch torch.manual_seed(19551105) m = torch.nn.BatchNorm2d(5, affine=False) input = torch.randn(2, 5, 3, 4) output = m(input) print(input.flatten()) print(output.flatten()) print(m.running_mean) print(m.running_var) */ #[test] fn batch_norm_test() -> Result<()> { let running_mean = Tensor::zeros(5, DType::F32, &Device::Cpu)?; let running_var = Tensor::ones(5, DType::F32, &Device::Cpu)?; let bn = BatchNorm::new_no_bias(5, running_mean.clone(), running_var.clone(), 1e-8)?; let input: [f32; 120] = [ -0.7493, -1.0410, 1.6977, -0.6579, 1.7982, -0.0087, 0.2812, -0.1190, 0.2908, -0.5975, -0.0278, -0.2138, -1.3130, -1.6048, -2.2028, 0.9452, 0.4002, 0.0831, 1.0004, 0.1860, 0.5004, 0.5539, 0.9991, -0.2540, -0.0703, -0.3752, -0.1096, -0.2374, 1.0258, -2.2208, -0.0257, 0.6073, -1.1627, -0.0964, -1.9718, 1.6577, 0.1931, -0.3692, -0.8011, 0.9059, 0.4797, 0.6521, -0.0165, -0.6683, -0.4148, 2.0649, -0.8276, 1.7947, -0.2061, 0.5812, -1.3598, 1.6192, 1.0466, -0.4423, 0.4202, 0.1749, 0.6969, 0.2616, -0.0369, -1.4951, -0.0814, -0.1877, 0.0267, 0.6150, 0.2402, -1.1440, -2.0068, 0.6032, -2.6639, 0.8260, 0.1085, -0.1693, 1.2805, 0.7654, -0.4930, 0.3770, 1.1309, 0.2303, 0.2949, -0.2634, -0.5225, 0.4269, 0.6341, 1.5736, 0.9827, -1.2499, 0.3509, -1.6243, -0.8123, 0.7634, -0.3047, 0.0143, -0.4032, 0.0537, 0.7022, 0.8405, -1.2221, -1.6847, -0.0714, -0.1608, 0.5579, -1.5858, 0.4617, -0.6480, 0.1332, 0.0419, -0.9784, 0.4173, 1.2313, -1.9046, -0.1656, 0.1259, 0.0763, 1.4252, -0.9115, -0.1093, -0.3100, -0.6734, -1.4357, 0.9205, ]; let input = Tensor::new(&input, &Device::Cpu)?.reshape((2, 5, 3, 4))?; let output = bn.forward_train(&input)?; assert_eq!(output.dims(), &[2, 5, 3, 4]); let output = output.flatten_all()?; assert_eq!( test_utils::to_vec1_round(&output, 4)?, &[ -0.6391, -0.9414, 1.8965, -0.5444, 2.0007, 0.1283, 0.4287, 0.014, 0.4387, -0.4818, 0.1085, -0.0842, -1.6809, -2.0057, -2.6714, 0.8328, 0.2262, -0.1268, 0.8943, -0.0123, 0.3377, 0.3973, 0.8928, -0.5021, 0.0861, -0.2324, 0.0451, -0.0884, 1.2311, -2.1603, 0.1327, 0.7939, -1.055, 0.0589, -1.9002, 1.8912, 0.2918, -0.3253, -0.7993, 1.0741, 0.6063, 0.7955, 0.0617, -0.6536, -0.3754, 2.3461, -0.8284, 2.0495, -0.201, 0.6476, -1.4446, 1.7665, 1.1493, -0.4556, 0.4741, 0.2097, 0.7723, 0.3031, -0.0186, -1.5905, 0.053, -0.0572, 0.165, 0.7746, 0.3862, -1.0481, -1.9422, 0.7624, -2.6231, 0.9933, 0.2498, -0.0381, 1.2061, 0.6327, -0.7681, 0.2004, 1.0396, 0.037, 0.109, -0.5125, -0.8009, 0.2559, 0.4865, 1.5324, 1.1861, -1.1461, 0.5261, -1.5372, -0.689, 0.957, -0.1587, 0.1745, -0.2616, 0.2156, 0.8931, 1.0375, -1.2614, -1.7691, 0.0015, -0.0966, 0.6921, -1.6605, 0.5866, -0.6313, 0.226, 0.1258, -0.9939, 0.5378, 1.3484, -2.0319, -0.1574, 0.1568, 0.1034, 1.5574, -0.9614, -0.0967, -0.313, -0.7047, -1.5264, 1.0134 ] ); let bn2 = BatchNorm::new( 5, running_mean, running_var, Tensor::new(&[0.5f32], &Device::Cpu)?.broadcast_as(5)?, Tensor::new(&[-1.5f32], &Device::Cpu)?.broadcast_as(5)?, 1e-8, )?; let output2 = bn2.forward_train(&input)?; assert_eq!(output2.dims(), &[2, 5, 3, 4]); let output2 = output2.flatten_all()?; let diff2 = ((output2 - (output * 0.5)?)? + 1.5)?.sqr()?; let sum_diff2 = diff2.sum_keepdim(0)?; assert_eq!(test_utils::to_vec1_round(&sum_diff2, 4)?, &[0f32]); assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, &[-0.0133, 0.0197, -0.0153, -0.0073, -0.0020] ); assert_eq!( test_utils::to_vec1_round(bn.running_var(), 4)?, &[0.9972, 0.9842, 0.9956, 0.9866, 0.9898] ); Ok(()) } // This test makes sure that we can train a batch norm layer using a VarMap. #[test] fn train_batch_norm() -> Result<()> { let vm = VarMap::new(); let vb = VarBuilder::from_varmap(&vm, DType::F32, &Device::Cpu); let bn = batch_norm(1, BatchNormConfig::default(), vb)?; // Get a copy of the original mean to ensure it is being updated. let original_mean = bn.running_mean().detach().copy()?; let var_map_mean = { vm.data() .lock() .unwrap() .get("running_mean") .unwrap() .clone() }; // Ensure the var map mean is the same as the running mean. assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?, ); // Train with a something guaranteed to be different from the running mean. let mean_plus_one = { let one = original_mean.ones_like()?; original_mean.add(&one)?.reshape((1, 1))? }; bn.forward_train(&mean_plus_one)?; // Assert that the running mean has been updated. assert_ne!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(&original_mean, 4)?, ); // Assert that the var map mean has been updated. assert_eq!( test_utils::to_vec1_round(bn.running_mean(), 4)?, test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?, ); Ok(()) }
candle/candle-nn/tests/batch_norm.rs/0
{ "file_path": "candle/candle-nn/tests/batch_norm.rs", "repo_id": "candle", "token_count": 3126 }
79
[package] name = "candle-pyo3" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [lib] name = "candle" crate-type = ["cdylib"] [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-nn = { workspace = true } candle-onnx = { workspace = true, optional = true } half = { workspace = true } intel-mkl-src = { workspace = true, optional = true } pyo3 = { version = "0.21.0", features = ["extension-module", "abi3-py38"] } [build-dependencies] pyo3-build-config = "0.21" [features] default = [] accelerate = ["dep:accelerate-src", "candle/accelerate"] cuda = ["candle/cuda"] mkl = ["dep:intel-mkl-src","candle/mkl"] onnx = ["dep:candle-onnx"]
candle/candle-pyo3/Cargo.toml/0
{ "file_path": "candle/candle-pyo3/Cargo.toml", "repo_id": "candle", "token_count": 315 }
80
from candle import Tensor, QTensor, DType from typing import ( Dict, Tuple, Any, Optional, Union, Iterator, Set, overload, Mapping, TypeVar, List, ) from collections import OrderedDict, namedtuple TensorLike = Union[Tensor, QTensor] T = TypeVar("T", bound="Module") class _IncompatibleKeys(namedtuple("IncompatibleKeys", ["missing_keys", "unexpected_keys"])): def __repr__(self): if not self.missing_keys and not self.unexpected_keys: return "<All keys matched successfully>" return super().__repr__() __str__ = __repr__ # see: https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/module.py class Module: """ Pytorch like Module. Base class for all neural network modules. Your models should also subclass this class. """ _modules: Dict[str, Optional["Module"]] _buffers: Dict[str, Optional[TensorLike]] _non_persistent_buffers_set: Set[str] _quantizable_buffers: Set[str] _version: int = 1 def __init__(self, *args, **kwargs) -> None: """ Initializes internal Module state """ super().__setattr__("_modules", OrderedDict()) super().__setattr__("_buffers", OrderedDict()) super().__setattr__("_non_persistent_buffers_set", set()) super().__setattr__("_quantizable_buffers", set()) def __call__(self, *input): """ Call self as a function. """ return self.forward(*input) def forward(self, *input): """ Defines the computation performed at every call. Should be overridden by all subclasses. """ pass def children(self) -> Iterator["Module"]: r"""Returns an iterator over immediate children modules. Yields: Module: a child module """ for name, module in self.named_children(): yield module def named_children(self) -> Iterator[Tuple[str, "Module"]]: r"""Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself. Yields: (str, Module): Tuple containing a name and child module Example:: >>> for name, module in model.named_children(): >>> if name in ['conv4', 'conv5']: >>> print(module) """ memo = set() for name, module in self._modules.items(): if module is not None and module not in memo: memo.add(module) yield name, module def add_module(self, name: str, module: Optional["Module"]) -> None: r"""Adds a child module to the current module. The module can be accessed as an attribute using the given name. Args: name (str): name of the child module. The child module can be accessed from this module using the given name module (Module): child module to be added to the module. """ if not isinstance(module, Module) and module is not None: raise TypeError(f"{str(module)} is not a Module subclass") elif not isinstance(name, str): raise TypeError(f"module name should be a string. Got {name}") elif hasattr(self, name) and name not in self._modules: raise KeyError(f"attribute '{name}' already exists") elif "." in name: raise KeyError(f'module name can\'t contain ".", got: {name}') elif name == "": raise KeyError('module name can\'t be empty string ""') self._modules[name] = module def register_module(self, name: str, module: Optional["Module"]) -> None: r"""Alias for :func:`add_module`.""" self.add_module(name, module) def modules(self) -> Iterator["Module"]: r"""Returns an iterator over all modules in the network.""" for _, module in self.named_modules(): yield module def named_modules( self, memo: Optional[Set["Module"]] = None, prefix: str = "", remove_duplicate: bool = True, ): r"""Returns an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, ``l`` will be returned only once. """ if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield prefix, self for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name for m in module.named_modules(memo, submodule_prefix, remove_duplicate): yield m def buffers(self, recurse: bool = True) -> Iterator[TensorLike]: """ Returns an iterator over module buffers. """ for name, buf in self.named_buffers(recurse=recurse): yield buf def named_buffers( self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True ) -> Iterator[Tuple[str, TensorLike]]: r"""Returns an iterator over module buffers, yielding both the name of the buffer as well as the buffer itself. Args: prefix (str): prefix to prepend to all buffer names. recurse (bool, optional): if True, then yields buffers of this module and all submodules. Otherwise, yields only buffers that are direct members of this module. Defaults to True. remove_duplicate (bool, optional): whether to remove the duplicated buffers in the result. Defaults to True. Yields: (str, Tensor): Tuple containing the name and buffer Example:: >>> for name, buf in self.named_buffers(): >>> if name in ['running_var']: >>> print(buf.size()) """ gen = self._named_members( lambda module: module._buffers.items(), prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate, ) yield from gen # The user can pass an optional arbitrary mappable object to `state_dict`, in which case `state_dict` returns # back that same object. But if they pass nothing, an `OrderedDict` is created and returned. T_destination = TypeVar("T_destination", bound=Dict[str, Any]) @overload def state_dict(self, *, destination: T_destination, prefix: str = ..., keep_vars: bool = ...) -> T_destination: ... @overload def state_dict(self, *, prefix: str = ..., keep_vars: bool = ...) -> Dict[str, Any]: ... def state_dict(self, *args, destination=None, prefix="", keep_vars=False): r"""Returns a dictionary containing references to the whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. .. note:: The returned object is a shallow copy. It contains references to the module's parameters and buffers. .. warning:: Currently ``state_dict()`` also accepts positional arguments for ``destination``, ``prefix`` and ``keep_vars`` in order. However, this is being deprecated and keyword arguments will be enforced in future releases. .. warning:: Please avoid the use of argument ``destination`` as it is not designed for end-users. Args: destination (dict, optional): If provided, the state of module will be updated into the dict and the same object is returned. Otherwise, an ``OrderedDict`` will be created and returned. Default: ``None``. prefix (str, optional): a prefix added to parameter and buffer names to compose the keys in state_dict. Default: ``''``. keep_vars (bool, optional): by default the :class:`~candle.Tensor` s returned in the state dict are detached from autograd. If it's set to ``True``, detaching will not be performed. Default: ``False``. Returns: dict: a dictionary containing a whole state of the module Example:: >>> # xdoctest: +SKIP("undefined vars") >>> module.state_dict().keys() ['bias', 'weight'] """ # TODO: Remove `args` and the parsing logic when BC allows. if len(args) > 0: if destination is None: destination = args[0] if len(args) > 1 and prefix == "": prefix = args[1] if len(args) > 2 and keep_vars is False: keep_vars = args[2] if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() local_metadata = dict(version=self._version) if hasattr(destination, "_metadata"): destination._metadata[prefix[:-1]] = local_metadata self._save_to_state_dict(destination, prefix, keep_vars) for name, module in self._modules.items(): if module is not None: module.state_dict( destination=destination, prefix=prefix + name + ".", keep_vars=keep_vars, ) return destination def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: if isinstance(buf, Tensor): destination[prefix + name] = buf if keep_vars else buf.detach() else: destination[prefix + name] = buf def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True, assign: bool = False): r"""Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. .. warning:: If :attr:`assign` is ``True`` the optimizer must be created after the call to :attr:`load_state_dict`. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~candle.nn.Module.state_dict` function. Default: ``True`` assign (bool, optional): whether to assign items in the state dictionary to their corresponding keys in the module instead of copying them inplace into the module's current parameters and buffers. When ``False``, the properties of the tensors in the current module are preserved while when ``True``, the properties of the Tensors in the state dict are preserved. Default: ``False`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ if not isinstance(state_dict, Mapping): raise TypeError(f"Expected state_dict to be dict-like, got {type(state_dict)}.") missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = OrderedDict(state_dict) if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] def load(module, local_state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) if assign: local_metadata["assign_to_params_buffers"] = assign module._load_from_state_dict( local_state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: child_prefix = prefix + name + "." child_state_dict = {k: v for k, v in local_state_dict.items() if k.startswith(child_prefix)} load(child, child_state_dict, child_prefix) load(self, state_dict) del load if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, "Unexpected key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in unexpected_keys)), ) if len(missing_keys) > 0: error_msgs.insert( 0, "Missing key(s) in state_dict: {}. ".format(", ".join(f'"{k}"' for k in missing_keys)), ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(self.__class__.__name__, "\n\t".join(error_msgs)) ) return _IncompatibleKeys(missing_keys, unexpected_keys) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~candle.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. Additionally, :attr:`local_metadata` can also contain the key `assign_to_params_buffers` that indicates whether keys should be assigned their corresponding tensor in the state_dict. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~candle.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~candle.nn.Module.load_state_dict` """ persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = persistent_buffers.items() local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not isinstance(input_param, (Tensor, QTensor)): error_msgs.append( f'While copying the parameter named "{key}", ' "expected Tensor-like object from checkpoint but " f"received {type(input_param)}" ) continue if input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: # Shape checks are already done above -> Just assign tensor setattr(self, name, input_param) except Exception as ex: error_msgs.append( f'While copying the parameter named "{key}", ' f"whose dimensions in the model are {param.shape} and " f"whose dimensions in the checkpoint are {input_param.shape}, " f"an exception occurred : {ex.args}." ) elif strict: missing_keys.append(key) if strict: for key in state_dict.keys(): if key.startswith(prefix): input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) def _named_members(self, get_members_fn, prefix="", recurse=True, remove_duplicate: bool = True): r"""Helper method for yielding various names + members of modules.""" memo = set() modules = self.named_modules(prefix=prefix, remove_duplicate=remove_duplicate) if recurse else [(prefix, self)] for module_prefix, module in modules: members = get_members_fn(module) for k, v in members: if v is None or v in memo: continue if remove_duplicate: memo.add(v) name = module_prefix + ("." if module_prefix else "") + k yield name, v def _get_name(self): return self.__class__.__name__ def _apply(self, fn): for module in self.children(): module._apply(fn) for key, buf in self._buffers.items(): if buf is not None: self._buffers[key] = fn(buf) return self def __move_tensor_to_device(self, tensor: TensorLike, device: str): if isinstance(tensor, Tensor): return tensor.to_device(device) else: raise NotImplementedError("Cannot offload QTensor to cuda, yet!") def device(self) -> str: """ Gets the device of the module, by inspecting its tensors. """ tensor = next(self.buffers()) if isinstance(tensor, Tensor): return tensor.device else: # QTensors can only be on the CPU return "cpu" def cuda(self: T) -> T: r"""Moves all model parameters and buffers to the GPU. This also makes associated parameters and buffers different objects. So it should be called before constructing optimizer if the module will live on GPU while being optimized. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cuda(t: TensorLike): return self.__move_tensor_to_device(t, "cuda") return self._apply(to_cuda) def cpu(self: T) -> T: r"""Moves all model parameters and buffers to the CPU. .. note:: This method modifies the module in-place. Returns: Module: self """ def to_cpu(t: TensorLike): return self.__move_tensor_to_device(t, "cpu") return self._apply(to_cpu) def __cast_tensor(self, tensor: TensorLike, dtype: Union[DType, str]): if isinstance(tensor, Tensor): return tensor.to_dtype(dtype) else: raise TypeError("candle.Module.to only accepts Tensor dtypes, but got desired dtype={}".format(dtype)) def type(self: T, dst_type: Union[DType, str]) -> T: r"""Casts all parameters and buffers to :attr:`dst_type`. .. note:: This method modifies the module in-place. Args: dst_type (type or string): the desired type Returns: Module: self """ def cast(t: TensorLike): return self.__cast_tensor(t, dst_type) return self._apply(cast) @overload def to( self: T, device: str = ..., dtype: Optional[Union[DType, str]] = ..., ) -> T: ... @overload def to(self: T, dtype: Union[DType, str]) -> T: ... def to(self, *args, **kwargs): r"""Moves and/or casts the parameters and buffers. This can be called as .. function:: to(device=None, dtype=None) :noindex: .. function:: to(dtype) :noindex: See below for examples. .. note:: This method modifies the module in-place. Args: device (:class:`candle.device`): the desired device of the parameters and buffers in this module dtype (:class:`candle.dtype`): the desired floating point dtype of the parameters and buffers in this module Returns: Module: self """ device = None dtype = None if args: for arg in args: # Assuming arg can be a string representing a device or a dtype if isinstance(arg, str): lower_arg = str(arg).lower() if lower_arg.startswith("cuda") or lower_arg == "cpu": device = lower_arg else: dtype = arg elif isinstance(arg, DType): dtype = str(arg) else: raise TypeError("Module.to() received an invalid combination of arguments. Got: {}".format(args)) if kwargs: device = kwargs.get("device", device) dtype = str(kwargs.get("dtype", dtype)) if device: device = device.lower() if dtype: dtype = dtype.lower() if dtype not in ["f32", "f16", "f64"]: raise TypeError( "candle.Module.to only accepts floating point" "dtypes, but got desired dtype={}".format(dtype) ) def convert(t): if dtype: t = self.__cast_tensor(t, dtype) if device: t = self.__move_tensor_to_device(t, device) return t return self._apply(convert) def __setattr__(self, __name: str, __value: Any) -> None: if isinstance(__value, Module): self._modules[__name] = __value elif isinstance(__value, QTensor): if __name in self._quantizable_buffers: type = __value.ggml_dtype.lower() if type in ["f32", "f16"]: # It is faster to just dequantize the tensor here and use the normal tensor operations dequant = __value.dequantize() if type == "f16": dequant = dequant.to_dtype("f16") self._buffers[__name] = dequant else: self._buffers[__name] = __value else: # We expect a normal tensor here => dequantize it self._buffers[__name] = __value.dequantize() elif isinstance(__value, Tensor): self._buffers[__name] = __value else: super().__setattr__(__name, __value) def __getattr__(self, __name: str) -> Any: if "_modules" in self.__dict__: modules = self.__dict__["_modules"] if __name in modules: return modules[__name] if "_buffers" in self.__dict__: tensors = self.__dict__["_buffers"] if __name in tensors: return tensors[__name] return super().__getattribute__(__name) def __delattr__(self, name): if name in self._buffers: del self._buffers[name] elif name in self._modules: del self._modules[name] else: super().__delattr__(name)
candle/candle-pyo3/py_src/candle/nn/module.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/module.py", "repo_id": "candle", "token_count": 12028 }
81
import candle print(f"mkl: {candle.utils.has_mkl()}") print(f"accelerate: {candle.utils.has_accelerate()}") print(f"num-threads: {candle.utils.get_num_threads()}") print(f"cuda: {candle.utils.cuda_is_available()}") t = candle.Tensor(42.0) print(t) print(t.shape, t.rank, t.device) print(t + t) t = candle.Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) print(t) print(t + t) t = t.reshape([2, 4]) print(t.matmul(t.t())) print(t.to_dtype(candle.u8)) print(t.to_dtype("u8")) t = candle.randn((5, 3)) print(t) print(t.dtype) t = candle.randn((16, 256)) quant_t = t.quantize("q6k") dequant_t = quant_t.dequantize() diff2 = (t - dequant_t).sqr() print(diff2.mean_all())
candle/candle-pyo3/test.py/0
{ "file_path": "candle/candle-pyo3/test.py", "repo_id": "candle", "token_count": 340 }
82
use super::with_tracing::{linear, Embedding, Linear}; use candle::{Module, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, VarBuilder}; use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub encoder_hidden_size: usize, pub intermediate_size: usize, pub projection_dim: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub max_position_embeddings: usize, pub hidden_act: candle_nn::Activation, pub layer_norm_eps: f64, pub is_decoder: bool, } #[derive(Debug, Clone)] struct TextEmbeddings { word_embedddings: Embedding, position_embeddings: Embedding, layer_norm: LayerNorm, position_ids: Tensor, } impl TextEmbeddings { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let word_embedddings = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?; let position_embeddings = Embedding::new( cfg.max_position_embeddings, cfg.hidden_size, vb.pp("position_embeddings"), )?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; let position_ids = Tensor::arange(0, cfg.max_position_embeddings as u32, vb.device())?.unsqueeze(0)?; Ok(Self { word_embedddings, position_embeddings, layer_norm, position_ids, }) } fn forward(&self, xs: &Tensor, past_kv_len: usize) -> Result<Tensor> { let seq_len = xs.dim(1)?; let position_ids = self.position_ids.narrow(1, past_kv_len, seq_len)?; let embeddings = self.word_embedddings.forward(xs)?; let position_embeddings = self.position_embeddings.forward(&position_ids)?; (embeddings + position_embeddings)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextSelfAttention { query: Linear, key: Linear, value: Linear, attention_head_size: usize, num_attention_heads: usize, attention_scale: f64, kv_cache: Option<(Tensor, Tensor)>, } impl TextSelfAttention { fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> { let num_attention_heads = cfg.num_attention_heads; let attention_head_size = cfg.hidden_size / num_attention_heads; let all_head_size = cfg.num_attention_heads * attention_head_size; let query = linear(cfg.hidden_size, all_head_size, vb.pp("query"))?; let in_size = if is_cross_attention { cfg.encoder_hidden_size } else { cfg.hidden_size }; let key = linear(in_size, all_head_size, vb.pp("key"))?; let value = linear(in_size, all_head_size, vb.pp("value"))?; let attention_scale = 1f64 / (attention_head_size as f64).sqrt(); Ok(Self { query, key, value, attention_head_size, num_attention_heads, attention_scale, kv_cache: None, }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, _) = xs.dims3()?; xs.reshape(( b_size, seq_len, self.num_attention_heads, self.attention_head_size, ))? .permute((0, 2, 1, 3)) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let query = self .transpose_for_scores(&self.query.forward(xs)?)? .contiguous()?; let (key, value) = match encoder_hidden_states { None => { let key = self.transpose_for_scores(&self.key.forward(xs)?)?; let value = self.transpose_for_scores(&self.value.forward(xs)?)?; let (key, value) = match &self.kv_cache { None => (key, value), Some((prev_key, prev_value)) => { let key = Tensor::cat(&[prev_key, &key], 2)?; let value = Tensor::cat(&[prev_value, &value], 2)?; (key, value) } }; self.kv_cache = Some((key.clone(), value.clone())); (key, value) } Some(xs) => { let key = self.transpose_for_scores(&self.key.forward(xs)?)?; let value = self.transpose_for_scores(&self.value.forward(xs)?)?; // no kv-cache in this case, but the results could probably be memoized. (key, value) } }; let key = key.contiguous()?; let value = value.contiguous()?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.attention_scale)?; let attention_scores = match attention_mask { Some(mask) => attention_scores.broadcast_add(mask)?, None => attention_scores, }; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2) } } #[derive(Debug, Clone)] struct TextSelfOutput { dense: Linear, layer_norm: LayerNorm, } impl TextSelfOutput { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { (xs.apply(&self.dense) + input_tensor)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextAttention { self_: TextSelfAttention, output: TextSelfOutput, } impl TextAttention { fn new(cfg: &Config, is_cross_attention: bool, vb: VarBuilder) -> Result<Self> { let self_ = TextSelfAttention::new(cfg, is_cross_attention, vb.pp("self"))?; let output = TextSelfOutput::new(cfg, vb.pp("output"))?; Ok(Self { self_, output }) } fn reset_kv_cache(&mut self) { self.self_.reset_kv_cache() } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: Option<&Tensor>, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let self_outputs = self .self_ .forward(xs, encoder_hidden_states, attention_mask)?; self.output.forward(&self_outputs, xs) } } #[derive(Debug, Clone)] struct TextIntermediate { dense: Linear, intermediate_act_fn: candle_nn::Activation, } impl TextIntermediate { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("dense"))?; Ok(Self { dense, intermediate_act_fn: cfg.hidden_act, }) } } impl Module for TextIntermediate { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)?.apply(&self.intermediate_act_fn) } } #[derive(Debug, Clone)] struct TextOutput { dense: Linear, layer_norm: LayerNorm, } impl TextOutput { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, layer_norm }) } fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { (xs.apply(&self.dense)? + input_tensor)?.apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextLayer { attention: TextAttention, cross_attention: Option<TextAttention>, intermediate: TextIntermediate, output: TextOutput, } impl TextLayer { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let attention = TextAttention::new(cfg, false, vb.pp("attention"))?; let cross_attention = if cfg.is_decoder { Some(TextAttention::new(cfg, true, vb.pp("crossattention"))?) } else { None }; let intermediate = TextIntermediate::new(cfg, vb.pp("intermediate"))?; let output = TextOutput::new(cfg, vb.pp("output"))?; Ok(Self { attention, cross_attention, intermediate, output, }) } fn reset_kv_cache(&mut self) { self.attention.reset_kv_cache(); if let Some(ca) = &mut self.cross_attention { ca.reset_kv_cache() } } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let attention_output = self.attention.forward(xs, None, Some(attention_mask))?; let attention_output = match &mut self.cross_attention { Some(ca) => ca.forward(&attention_output, Some(encoder_hidden_states), None)?, None => candle::bail!("expected some cross-attn"), }; let intermediate_output = self.intermediate.forward(&attention_output)?; self.output.forward(&intermediate_output, &attention_output) } } #[derive(Debug, Clone)] struct TextEncoder { layers: Vec<TextLayer>, } impl TextEncoder { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layer"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for i in 0..cfg.num_hidden_layers { let layer = TextLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn reset_kv_cache(&mut self) { self.layers.iter_mut().for_each(|l| l.reset_kv_cache()) } fn forward( &mut self, xs: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter_mut() { xs = layer.forward(&xs, encoder_hidden_states, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct TextPooler { dense: Linear, } impl TextPooler { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; Ok(Self { dense }) } } impl Module for TextPooler { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.narrow(D::Minus1, 0, 1)? .squeeze(D::Minus1)? .apply(&self.dense)? .tanh() } } #[derive(Debug, Clone)] struct TextPredictionHeadTransform { dense: Linear, transform_act_fn: candle_nn::Activation, layer_norm: LayerNorm, } impl TextPredictionHeadTransform { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?; Ok(Self { dense, transform_act_fn: cfg.hidden_act, layer_norm, }) } } impl Module for TextPredictionHeadTransform { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.dense)? .apply(&self.transform_act_fn)? .apply(&self.layer_norm) } } #[derive(Debug, Clone)] struct TextLMPredictionHead { transform: TextPredictionHeadTransform, decoder: Linear, } impl TextLMPredictionHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let transform = TextPredictionHeadTransform::new(cfg, vb.pp("transform"))?; let weight = vb.get((cfg.vocab_size, cfg.hidden_size), "decoder.weight")?; let bias = vb.get(cfg.vocab_size, "bias")?; let decoder = Linear::from_weights(weight, Some(bias)); Ok(Self { transform, decoder }) } } impl Module for TextLMPredictionHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.transform)?.apply(&self.decoder) } } #[derive(Debug, Clone)] struct TextOnlyMLMHead { predictions: TextLMPredictionHead, } impl TextOnlyMLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let predictions = TextLMPredictionHead::new(cfg, vb.pp("predictions"))?; Ok(Self { predictions }) } } impl Module for TextOnlyMLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.predictions.forward(xs) } } #[derive(Debug, Clone)] struct TextModel { embeddings: TextEmbeddings, encoder: TextEncoder, past_kv_len: usize, // We do not need the pooler for caption generation } impl TextModel { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embeddings = TextEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = TextEncoder::new(cfg, vb.pp("encoder"))?; Ok(Self { embeddings, encoder, past_kv_len: 0, }) } fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: &Tensor, attention_mask: &Tensor, ) -> Result<Tensor> { let (_b_sz, seq_len) = input_ids.dims2()?; let embedding_output = self.embeddings.forward(input_ids, self.past_kv_len)?; let sequence_output = self.encoder .forward(&embedding_output, encoder_hidden_states, attention_mask)?; self.past_kv_len += seq_len; // We're interested in the sequence-output rather than the pooled-output. Ok(sequence_output) } fn reset_kv_cache(&mut self) { self.past_kv_len = 0; self.encoder.reset_kv_cache(); } } #[derive(Debug, Clone)] pub struct TextLMHeadModel { bert: TextModel, cls: TextOnlyMLMHead, } impl TextLMHeadModel { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let bert = TextModel::new(cfg, vb.pp("bert"))?; let cls = TextOnlyMLMHead::new(cfg, vb.pp("cls"))?; Ok(Self { bert, cls }) } pub fn forward( &mut self, input_ids: &Tensor, encoder_hidden_states: &Tensor, ) -> Result<Tensor> { let seq_len = input_ids.dim(1)?; let mask: Vec<_> = (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (seq_len, seq_len), input_ids.device())?; let sequence_output = self.bert.forward(input_ids, encoder_hidden_states, &mask)?; let prediction_scores = self.cls.forward(&sequence_output)?; // return_logits is false so we don't discard the last sequence element. Ok(prediction_scores) } pub fn reset_kv_cache(&mut self) { self.bert.reset_kv_cache() } }
candle/candle-transformers/src/models/blip_text.rs/0
{ "file_path": "candle/candle-transformers/src/models/blip_text.rs", "repo_id": "candle", "token_count": 7148 }
83
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::linear_no_bias as linear; use candle_nn::{embedding, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder}; use std::collections::HashMap; #[derive(Debug, Clone)] pub struct Config { pub dim: usize, // transformer dimension pub hidden_dim: usize, // for ffn layers pub n_layers: usize, // number of layers pub n_heads: usize, // number of query heads pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery) pub vocab_size: usize, // vocabulary size, usually 256 (byte-level) pub seq_len: usize, // max sequence length pub norm_eps: f64, } impl Config { pub fn tiny_260k() -> Self { Self { dim: 64, hidden_dim: 768, n_layers: 5, n_heads: 8, n_kv_heads: 4, vocab_size: 32000, seq_len: 512, norm_eps: 1e-5, } } pub fn tiny_15m() -> Self { Self { dim: 288, hidden_dim: 768, n_layers: 6, n_heads: 6, n_kv_heads: 6, vocab_size: 32000, seq_len: 256, norm_eps: 1e-5, } } pub fn tiny_42m() -> Self { Self { dim: 512, hidden_dim: 768, n_layers: 8, n_heads: 8, n_kv_heads: 8, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } pub fn tiny_110m() -> Self { Self { dim: 768, hidden_dim: 768, n_layers: 12, n_heads: 12, n_kv_heads: 12, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } } #[derive(Debug, Clone)] pub struct Cache { masks: HashMap<usize, Tensor>, pub use_kv_cache: bool, pub kvs: Vec<Option<(Tensor, Tensor)>>, pub cos: Tensor, pub sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_elem = cfg.dim / cfg.n_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), vb.device())?; let idx_theta = Tensor::arange(0, cfg.seq_len as u32, vb.device())? .to_dtype(DType::F32)? .reshape((cfg.seq_len, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let precomputed_cos = idx_theta.cos()?; let precomputed_sin = idx_theta.sin()?; let freq_cis_real = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real") .unwrap_or(precomputed_cos); let freq_cis_imag = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag") .unwrap_or(precomputed_sin); let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; Ok(Self { masks: HashMap::new(), use_kv_cache, kvs: vec![None; cfg.n_layers], cos, sin, device: vb.device().clone(), }) } pub fn mask(&mut self, t: usize) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = if seq_len <= 1 { att } else { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl Llama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c.rs", "repo_id": "candle", "token_count": 6478 }
84
use super::quantized_blip_text as blip_text; use crate::quantized_nn::{layer_norm, linear, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{Module, Result, Tensor, D}; use candle_nn::{Conv2d, Conv2dConfig, LayerNorm}; pub type VisionConfig = super::blip::VisionConfig; pub type Config = super::blip::Config; #[derive(Debug, Clone)] struct VisionEmbeddings { class_embedding: Tensor, patch_embedding: Conv2d, position_embedding: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let class_embedding = vb .get((1, 1, cfg.hidden_size), "class_embedding")? .dequantize(vb.device())?; let conv_cfg = Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let pe_vb = vb.pp("patch_embedding"); let pe_weight = pe_vb .get( (cfg.hidden_size, 3, cfg.patch_size, cfg.patch_size), "weight", )? .dequantize(vb.device())?; let pe_bias = pe_vb .get(cfg.hidden_size, "bias")? .dequantize(vb.device())?; let patch_embedding = Conv2d::new(pe_weight, Some(pe_bias), conv_cfg); let num_patches1 = cfg.image_size / cfg.patch_size; let num_patches = num_patches1 * num_patches1; let num_positions = num_patches + 1; let position_embedding = vb .get((1, num_positions, cfg.hidden_size), "position_embedding")? .dequantize(vb.device())?; Ok(Self { class_embedding, patch_embedding, position_embedding, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let target_dtype = xs.dtype(); let b_size = xs.dim(0)?; let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?; let d = self.class_embedding.dim(D::Minus1)?; let class_embeds = self .class_embedding .broadcast_as((b_size, 1, d))? .to_dtype(target_dtype)?; let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?; let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct Attention { qkv: Linear, projection: Linear, scale: f64, num_heads: usize, } impl Attention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = embed_dim / num_heads; let scale = 1f64 / (head_dim as f64).sqrt(); let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?; let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?; Ok(Self { qkv, projection, scale, num_heads, }) } fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, tgt_len, embed_dim) = xs.dims3()?; let mixed_qkv = xs .apply(&self.qkv)? .reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))? .permute((2, 0, 3, 1, 4))?; let query = mixed_qkv.get(0)?; let key = mixed_qkv.get(1)?; let value = mixed_qkv.get(2)?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.scale)?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let attention_probs = match attn_mask { None => attention_probs, Some(attn_mask) => (attention_probs * attn_mask)?, }; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2)? .apply(&self.projection) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { activation_fn: candle_nn::Activation, fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { activation_fn: cfg.hidden_act, fc1, fc2, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: MLP, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?; let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; xs + residual } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for i in 0..cfg.num_hidden_layers { let layer = EncoderLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct VisionModel { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, } impl VisionModel { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, post_layernorm, }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let encoder_outputs = self.encoder.forward(&xs, None)?; // Return the last hidden state rather than pooled outputs. encoder_outputs.apply(&self.post_layernorm) } } #[derive(Debug, Clone)] pub struct BlipForConditionalGeneration { vision_model: VisionModel, text_decoder: blip_text::TextLMHeadModel, } impl BlipForConditionalGeneration { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?; let text_decoder = blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?; Ok(Self { vision_model, text_decoder, }) } pub fn vision_model(&self) -> &VisionModel { &self.vision_model } pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel { &mut self.text_decoder } pub fn reset_kv_cache(&mut self) { self.text_decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/quantized_blip.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_blip.rs", "repo_id": "candle", "token_count": 4013 }
85
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: usize, pub max_window_layers: usize, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub use_sliding_window: bool, pub hidden_act: Activation, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } }
candle/candle-transformers/src/models/qwen2.rs/0
{ "file_path": "candle/candle-transformers/src/models/qwen2.rs", "repo_id": "candle", "token_count": 5856 }
86
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! https://github.com/openai/CLIP use candle::{DType, Device, Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone, Copy)] pub enum Activation { QuickGelu, Gelu, GeluErf, } impl Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?, Activation::Gelu => xs.gelu(), Activation::GeluErf => xs.gelu_erf(), } } } #[derive(Debug, Clone)] pub struct Config { vocab_size: usize, embed_dim: usize, // aka config.hidden_size activation: Activation, // aka config.hidden_act intermediate_size: usize, pub max_position_embeddings: usize, // The character to use for padding, use EOS when not set. pub pad_with: Option<String>, num_hidden_layers: usize, num_attention_heads: usize, #[allow(dead_code)] projection_dim: usize, } impl Config { // The config details can be found in the "text_config" section of this json file: // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json pub fn v1_5() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/text_encoder/config.json pub fn v2_1() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 23, num_attention_heads: 16, projection_dim: 512, activation: Activation::Gelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder/config.json pub fn sdxl() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder_2/config.json pub fn sdxl2() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 1280, activation: Activation::Gelu, } } pub fn ssd1b() -> Self { Self::sdxl() } pub fn ssd1b2() -> Self { Self::sdxl2() } // https://huggingface.co/warp-ai/wuerstchen/blob/main/text_encoder/config.json pub fn wuerstchen() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 24, num_attention_heads: 16, projection_dim: 1024, activation: Activation::GeluErf, } } // https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/text_encoder/config.json pub fn wuerstchen_prior() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 512, activation: Activation::GeluErf, } } } // CLIP Text Model // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py #[derive(Debug)] struct ClipTextEmbeddings { token_embedding: candle_nn::Embedding, position_embedding: candle_nn::Embedding, position_ids: Tensor, } impl ClipTextEmbeddings { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let token_embedding = candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?; let position_embedding = candle_nn::embedding( c.max_position_embeddings, c.embed_dim, vs.pp("position_embedding"), )?; let position_ids = Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?; Ok(ClipTextEmbeddings { token_embedding, position_embedding, position_ids, }) } } impl Module for ClipTextEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let token_embedding = self.token_embedding.forward(xs)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; token_embedding.broadcast_add(&position_embedding) } } #[derive(Debug)] struct ClipAttention { k_proj: candle_nn::Linear, v_proj: candle_nn::Linear, q_proj: candle_nn::Linear, out_proj: candle_nn::Linear, head_dim: usize, scale: f64, num_attention_heads: usize, } impl ClipAttention { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let embed_dim = c.embed_dim; let num_attention_heads = c.num_attention_heads; let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?; let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?; let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?; let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?; let head_dim = embed_dim / num_attention_heads; let scale = (head_dim as f64).powf(-0.5); Ok(ClipAttention { k_proj, v_proj, q_proj, out_proj, head_dim, scale, num_attention_heads, }) } fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> { xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let in_dtype = xs.dtype(); let (bsz, seq_len, embed_dim) = xs.dims3()?; let query_states = (self.q_proj.forward(xs)? * self.scale)?; let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim); let query_states = self .shape(&query_states, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let key_states = self .shape(&self.k_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let value_states = self .shape(&self.v_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let src_len = key_states.dim(1)?; let attn_weights = attn_weights .reshape((bsz, self.num_attention_heads, seq_len, src_len))? .broadcast_add(causal_attention_mask)?; let attn_weights = attn_weights.reshape((bsz * self.num_attention_heads, seq_len, src_len))?; let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?; let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?; let attn_output = attn_output .reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))? .transpose(1, 2)? .reshape((bsz, seq_len, embed_dim))?; self.out_proj.forward(&attn_output) } } #[derive(Debug)] struct ClipMlp { fc1: candle_nn::Linear, fc2: candle_nn::Linear, activation: Activation, } impl ClipMlp { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let fc1 = candle_nn::linear(c.embed_dim, c.intermediate_size, vs.pp("fc1"))?; let fc2 = candle_nn::linear(c.intermediate_size, c.embed_dim, vs.pp("fc2"))?; Ok(ClipMlp { fc1, fc2, activation: c.activation, }) } } impl ClipMlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?; self.fc2.forward(&self.activation.forward(&xs)?) } } #[derive(Debug)] struct ClipEncoderLayer { self_attn: ClipAttention, layer_norm1: candle_nn::LayerNorm, mlp: ClipMlp, layer_norm2: candle_nn::LayerNorm, } impl ClipEncoderLayer { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?; let layer_norm1 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm1"))?; let mlp = ClipMlp::new(vs.pp("mlp"), c)?; let layer_norm2 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm2"))?; Ok(ClipEncoderLayer { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self.layer_norm1.forward(xs)?; let xs = self.self_attn.forward(&xs, causal_attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = self.layer_norm2.forward(&xs)?; let xs = self.mlp.forward(&xs)?; xs + residual } } #[derive(Debug)] struct ClipEncoder { layers: Vec<ClipEncoderLayer>, } impl ClipEncoder { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("layers"); let mut layers: Vec<ClipEncoderLayer> = Vec::new(); for index in 0..c.num_hidden_layers { let layer = ClipEncoderLayer::new(vs.pp(&index.to_string()), c)?; layers.push(layer) } Ok(ClipEncoder { layers }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, causal_attention_mask)?; } Ok(xs) } } /// A CLIP transformer based model. #[derive(Debug)] pub struct ClipTextTransformer { embeddings: ClipTextEmbeddings, encoder: ClipEncoder, final_layer_norm: candle_nn::LayerNorm, } impl ClipTextTransformer { pub fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("text_model"); let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?; let encoder = ClipEncoder::new(vs.pp("encoder"), c)?; let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?; Ok(ClipTextTransformer { embeddings, encoder, final_layer_norm, }) } // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py#L678 fn build_causal_attention_mask( bsz: usize, seq_len: usize, mask_after: usize, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if j > i || j > mask_after { f32::MIN } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?; mask.broadcast_as((bsz, seq_len, seq_len)) } pub fn forward_with_mask(&self, xs: &Tensor, mask_after: usize) -> Result<Tensor> { let (bsz, seq_len) = xs.dims2()?; let xs = self.embeddings.forward(xs)?; let causal_attention_mask = Self::build_causal_attention_mask(bsz, seq_len, mask_after, xs.device())?; let xs = self.encoder.forward(&xs, &causal_attention_mask)?; self.final_layer_norm.forward(&xs) } } impl Module for ClipTextTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.forward_with_mask(xs, usize::MAX) } }
candle/candle-transformers/src/models/stable_diffusion/clip.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/clip.rs", "repo_id": "candle", "token_count": 6474 }
87
//! VGG-16 model implementation. //! //! See Very Deep Convolutional Networks for Large-Scale Image Recognition //! <https://arxiv.org/abs/1409.1556> use candle::{ModuleT, Result, Tensor}; use candle_nn::{FuncT, VarBuilder}; // Enum representing the different VGG models pub enum Models { Vgg13, Vgg16, Vgg19, } // Struct representing a VGG model #[derive(Debug)] pub struct Vgg<'a> { blocks: Vec<FuncT<'a>>, } // Struct representing the configuration for the pre-logit layer struct PreLogitConfig { in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, } // Implementation of the VGG model impl<'a> Vgg<'a> { // Function to create a new VGG model pub fn new(vb: VarBuilder<'a>, model: Models) -> Result<Self> { let blocks = match model { Models::Vgg13 => vgg13_blocks(vb)?, Models::Vgg16 => vgg16_blocks(vb)?, Models::Vgg19 => vgg19_blocks(vb)?, }; Ok(Self { blocks }) } } // Implementation of the forward pass for the VGG model impl ModuleT for Vgg<'_> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let mut xs = xs.unsqueeze(0)?; for block in self.blocks.iter() { xs = xs.apply_t(block, train)?; } Ok(xs) } } // Function to create a conv2d block // The block is composed of two conv2d layers followed by a max pool layer fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result<FuncT<'static>> { let layers = convs .iter() .enumerate() .map(|(_, &(in_c, out_c, name))| { candle_nn::conv2d( in_c, out_c, 3, candle_nn::Conv2dConfig { stride: 1, padding: 1, ..Default::default() }, vb.pp(name), ) }) .collect::<Result<Vec<_>>>()?; Ok(FuncT::new(move |xs, _train| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)?.relu()? } xs = xs.max_pool2d_with_stride(2, 2)?; Ok(xs) })) } // Function to create a fully connected layer // The layer is composed of two linear layers followed by a dropout layer fn fully_connected( num_classes: usize, pre_logit_1: PreLogitConfig, pre_logit_2: PreLogitConfig, vb: VarBuilder, ) -> Result<FuncT> { let lin = get_weights_and_biases( &vb.pp("pre_logits.fc1"), pre_logit_1.in_dim, pre_logit_1.target_in, pre_logit_1.target_out, )?; let lin2 = get_weights_and_biases( &vb.pp("pre_logits.fc2"), pre_logit_2.in_dim, pre_logit_2.target_in, pre_logit_2.target_out, )?; let dropout1 = candle_nn::Dropout::new(0.5); let dropout2 = candle_nn::Dropout::new(0.5); let dropout3 = candle_nn::Dropout::new(0.5); Ok(FuncT::new(move |xs, train| { let xs = xs.reshape((1, pre_logit_1.target_out))?; let xs = xs.apply_t(&dropout1, train)?.apply(&lin)?.relu()?; let xs = xs.apply_t(&dropout2, train)?.apply(&lin2)?.relu()?; let lin3 = candle_nn::linear(4096, num_classes, vb.pp("head.fc"))?; let xs = xs.apply_t(&dropout3, train)?.apply(&lin3)?.relu()?; Ok(xs) })) } // Function to get the weights and biases for a layer // This is required because the weights and biases are stored in different format than our linear layer expects fn get_weights_and_biases( vs: &VarBuilder, in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, ) -> Result<candle_nn::Linear> { let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints(in_dim, "weight", init_ws)?; let ws = ws.reshape((target_in, target_out))?; let bound = 1. / (target_out as f64).sqrt(); let init_bs = candle_nn::Init::Uniform { lo: -bound, up: bound, }; let bs = vs.get_with_hints(target_in, "bias", init_bs)?; Ok(candle_nn::Linear::new(ws, Some(bs))) } fn vgg13_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block(&[(128, 256, "features.10"), (256, 256, "features.12")], &vb)?, conv2d_block(&[(256, 512, "features.15"), (512, 512, "features.17")], &vb)?, conv2d_block(&[(512, 512, "features.20"), (512, 512, "features.22")], &vb)?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg16_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.17"), (512, 512, "features.19"), (512, 512, "features.21"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.24"), (512, 512, "features.26"), (512, 512, "features.28"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg19_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), (256, 256, "features.16"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.19"), (512, 512, "features.21"), (512, 512, "features.23"), (512, 512, "features.25"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.28"), (512, 512, "features.30"), (512, 512, "features.32"), (512, 512, "features.34"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) }
candle/candle-transformers/src/models/vgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/vgg.rs", "repo_id": "candle", "token_count": 4303 }
88
pub mod text_generation;
candle/candle-transformers/src/pipelines/mod.rs/0
{ "file_path": "candle/candle-transformers/src/pipelines/mod.rs", "repo_id": "candle", "token_count": 7 }
89