text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
471
from .adabelief import AdaBelief from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .adamw import AdamW from .adan import Adan from .lamb import Lamb from .lars import Lars from .lookahead import Lookahead from .madgrad import MADGRAD from .nadam import Nadam from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from .lion import Lion from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs
pytorch-image-models/timm/optim/__init__.py/0
{ "file_path": "pytorch-image-models/timm/optim/__init__.py", "repo_id": "pytorch-image-models", "token_count": 170 }
200
"""RAdam Optimizer. Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 """ import math import torch from torch.optim.optimizer import Optimizer class RAdam(Optimizer): def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)]) super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) @torch.no_grad() def step(self, closure=None): loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_fp32 = p.float() state = self.state[p] if len(state) == 0: state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_fp32) state['exp_avg_sq'] = torch.zeros_like(p_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['step'] += 1 buffered = group['buffer'][int(state['step'] % 10)] if state['step'] == buffered[0]: num_sma, step_size = buffered[1], buffered[2] else: buffered[0] = state['step'] beta2_t = beta2 ** state['step'] num_sma_max = 2 / (1 - beta2) - 1 num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) buffered[1] = num_sma # more conservative since it's an approximated value if num_sma >= 5: step_size = group['lr'] * math.sqrt( (1 - beta2_t) * (num_sma - 4) / (num_sma_max - 4) * (num_sma - 2) / num_sma * num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) else: step_size = group['lr'] / (1 - beta1 ** state['step']) buffered[2] = step_size if group['weight_decay'] != 0: p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) # more conservative since it's an approximated value if num_sma >= 5: denom = exp_avg_sq.sqrt().add_(group['eps']) p_fp32.addcdiv_(exp_avg, denom, value=-step_size) else: p_fp32.add_(exp_avg, alpha=-step_size) p.copy_(p_fp32) return loss
pytorch-image-models/timm/optim/radam.py/0
{ "file_path": "pytorch-image-models/timm/optim/radam.py", "repo_id": "pytorch-image-models", "token_count": 1967 }
201
import torch from timm.utils.agc import adaptive_clip_grad def dispatch_clip_grad(parameters, value: float, mode: str = 'norm', norm_type: float = 2.0): """ Dispatch to gradient clipping method Args: parameters (Iterable): model parameters to clip value (float): clipping value/factor/norm, mode dependant mode (str): clipping mode, one of 'norm', 'value', 'agc' norm_type (float): p-norm, default 2.0 """ if mode == 'norm': torch.nn.utils.clip_grad_norm_(parameters, value, norm_type=norm_type) elif mode == 'value': torch.nn.utils.clip_grad_value_(parameters, value) elif mode == 'agc': adaptive_clip_grad(parameters, value, norm_type=norm_type) else: assert False, f"Unknown clip mode ({mode})."
pytorch-image-models/timm/utils/clip_grad.py/0
{ "file_path": "pytorch-image-models/timm/utils/clip_grad.py", "repo_id": "pytorch-image-models", "token_count": 306 }
202
aml target server/transformers server/flash-attention
text-generation-inference/.dockerignore/0
{ "file_path": "text-generation-inference/.dockerignore", "repo_id": "text-generation-inference", "token_count": 16 }
203
install-server: cd server && make install install-custom-kernels: if [ "$$BUILD_EXTENSIONS" = "True" ]; then cd server/custom_kernels && python setup.py install; else echo "Custom kernels are disabled, you need to set the BUILD_EXTENSIONS environment variable to 'True' in order to build them. (Please read the docs, kernels might not work on all hardware)"; fi install-integration-tests: cd integration-tests && pip install -r requirements.txt cd clients/python && pip install . install-router: cd router && cargo install --path . install-launcher: cd launcher && cargo install --path . install-benchmark: cd benchmark && cargo install --path . install: install-server install-router install-launcher install-custom-kernels server-dev: cd server && make run-dev router-dev: cd router && cargo run -- --port 8080 rust-tests: install-router install-launcher cargo test integration-tests: install-integration-tests pytest -s -vv -m "not private" integration-tests update-integration-tests: install-integration-tests pytest -s -vv --snapshot-update integration-tests python-server-tests: HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests python-client-tests: pytest clients/python/tests python-tests: python-server-tests python-client-tests run-falcon-7b-instruct: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080 run-falcon-7b-instruct-quantize: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080 clean: rm -rf target aml
text-generation-inference/Makefile/0
{ "file_path": "text-generation-inference/Makefile", "repo_id": "text-generation-inference", "token_count": 498 }
204
# Serving Private & Gated Models If the model you wish to serve is behind gated access or the model repository on Hugging Face Hub is private, and you have access to the model, you can provide your Hugging Face Hub access token. You can generate and copy a read token from [Hugging Face Hub tokens page](https://huggingface.co/settings/tokens) If you're using the CLI, set the `HUGGING_FACE_HUB_TOKEN` environment variable. For example: ``` export HUGGING_FACE_HUB_TOKEN=<YOUR READ TOKEN> ``` If you would like to do it through Docker, you can provide your token by specifying `HUGGING_FACE_HUB_TOKEN` as shown below. ```bash model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data token=<your READ token> docker run --gpus all \ --shm-size 1g \ -e HUGGING_FACE_HUB_TOKEN=$token \ -p 8080:80 \ -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.4 \ --model-id $model ```
text-generation-inference/docs/source/basic_tutorials/gated_model_access.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/gated_model_access.md", "repo_id": "text-generation-inference", "token_count": 320 }
205
# Messages API Text Generation Inference (TGI) now supports the Messages API, which is fully compatible with the OpenAI Chat Completion API. This feature is available starting from version 1.4.0. You can use OpenAI's client libraries or third-party libraries expecting OpenAI schema to interact with TGI's Messages API. Below are some examples of how to utilize this compatibility. > **Note:** The Messages API is supported from TGI version 1.4.0 and above. Ensure you are using a compatible version to access this feature. #### Table of Contents - [Making a Request](#making-a-request) - [Streaming](#streaming) - [Synchronous](#synchronous) - [Hugging Face Inference Endpoints](#hugging-face-inference-endpoints) - [Cloud Providers](#cloud-providers) - [Amazon SageMaker](#amazon-sagemaker) ## Making a Request You can make a request to TGI's Messages API using `curl`. Here's an example: ```bash curl localhost:3000/v1/chat/completions \ -X POST \ -d '{ "model": "tgi", "messages": [ { "role": "system", "content": "You are a helpful assistant." }, { "role": "user", "content": "What is deep learning?" } ], "stream": true, "max_tokens": 20 }' \ -H 'Content-Type: application/json' ``` ## Streaming You can also use OpenAI's Python client library to make a streaming request. Here's how: ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( base_url="http://localhost:3000/v1", api_key="-" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) # iterate and print stream for message in chat_completion: print(message) ``` ## Synchronous If you prefer to make a synchronous request, you can do so like this: ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( base_url="http://localhost:3000/v1", api_key="-" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=False ) print(chat_completion) ``` ## Hugging Face Inference Endpoints The Messages API is integrated with [Inference Endpoints](https://huggingface.co/inference-endpoints/dedicated). Every endpoint that uses "Text Generation Inference" with an LLM, which has a chat template can now be used. Below is an example of how to use IE with TGI using OpenAI's Python client library: > **Note:** Make sure to replace `base_url` with your endpoint URL and to include `v1/` at the end of the URL. The `api_key` should be replaced with your Hugging Face API key. ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI( # replace with your endpoint url, make sure to include "v1/" at the end base_url="https://vlzz10eq3fol3429.us-east-1.aws.endpoints.huggingface.cloud/v1/", # replace with your API key api_key="hf_XXX" ) chat_completion = client.chat.completions.create( model="tgi", messages=[ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ], stream=True ) # iterate and print stream for message in chat_completion: print(message.choices[0].delta.content, end="") ``` ## Cloud Providers TGI can be deployed on various cloud providers for scalable and robust text generation. One such provider is Amazon SageMaker, which has recently added support for TGI. Here's how you can deploy TGI on Amazon SageMaker: ## Amazon SageMaker To enable the Messages API in Amazon SageMaker you need to set the environment variable `MESSAGES_API_ENABLED=true`. This will modify the `/invocations` route to accept Messages dictonaries consisting out of role and content. See the example below on how to deploy Llama with the new Messages API. ```python import json import sagemaker import boto3 from sagemaker.huggingface import HuggingFaceModel, get_huggingface_llm_image_uri try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] # Hub Model configuration. https://huggingface.co/models hub = { 'HF_MODEL_ID':'HuggingFaceH4/zephyr-7b-beta', 'SM_NUM_GPUS': json.dumps(1), 'MESSAGES_API_ENABLED': True } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( image_uri=get_huggingface_llm_image_uri("huggingface",version="1.4.0"), env=hub, role=role, ) # deploy model to SageMaker Inference predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.g5.2xlarge", container_startup_health_check_timeout=300, ) # send request predictor.predict({ "messages": [ {"role": "system", "content": "You are a helpful assistant." }, {"role": "user", "content": "What is deep learning?"} ] }) ```
text-generation-inference/docs/source/messages_api.md/0
{ "file_path": "text-generation-inference/docs/source/messages_api.md", "repo_id": "text-generation-inference", "token_count": 1731 }
206
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 330, "logprob": null, "text": "ir" }, { "id": 1622, "logprob": -7.8125, "text": "af" }, { "id": 249, "logprob": -4.5, "text": "at" }, { "id": 1480, "logprob": -10.875, "text": "ron" }, { "id": 37, "logprob": -3.6875, "text": ":" } ], "seed": 0, "tokens": [ { "id": 836, "logprob": -1.265625, "special": false, "text": " i" }, { "id": 18, "logprob": -0.119628906, "special": false, "text": "'" }, { "id": 298, "logprob": -2.265625, "special": false, "text": "ve" }, { "id": 650, "logprob": -0.49804688, "special": false, "text": " been" }, { "id": 1241, "logprob": 0.0, "special": false, "text": " using" }, { "id": 334, "logprob": 0.0, "special": false, "text": " it" }, { "id": 312, "logprob": -1.2421875, "special": false, "text": " for" }, { "id": 909, "logprob": -0.99609375, "special": false, "text": " years" }, { "id": 193, "logprob": -0.30273438, "special": false, "text": "\n" }, { "id": 807, "logprob": -1.078125, "special": false, "text": "ik" } ] }, "generated_text": "Girafatron is obsessed with giraffes, the most glorious animal on the face of this Earth. Giraftron believes all other animals are irrelevant when compared to the glorious majesty of the giraffe.\nDaniel: Hello, Girafatron!\nGirafatron: i've been using it for years\nik" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon_all_params.json", "repo_id": "text-generation-inference", "token_count": 1204 }
207
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 338, "logprob": -10.0078125, "text": "is" }, { "id": 21784, "logprob": -15.515625, "text": "Deep" }, { "id": 29257, "logprob": -2.8847656, "text": "Learning" }, { "id": 29973, "logprob": -4.140625, "text": "?" } ], "seed": 0, "tokens": [ { "id": 13, "logprob": -1.1582031, "special": false, "text": "\n" }, { "id": 2772, "logprob": -0.23083496, "special": false, "text": "De" }, { "id": 1022, "logprob": 0.0, "special": false, "text": "ep" }, { "id": 6509, "logprob": 0.0, "special": false, "text": " learning" }, { "id": 29892, "logprob": -0.61816406, "special": false, "text": "," }, { "id": 607, "logprob": -0.7089844, "special": false, "text": " which" }, { "id": 508, "logprob": -1.7724609, "special": false, "text": " can" }, { "id": 367, "logprob": 0.0, "special": false, "text": " be" }, { "id": 5545, "logprob": 0.0, "special": false, "text": " considered" }, { "id": 408, "logprob": -0.3869629, "special": false, "text": " as" } ] }, "generated_text": "What is Deep Learning?\nDeep learning, which can be considered as" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_medusa/test_flash_medusa_all_params.json", "repo_id": "text-generation-inference", "token_count": 1153 }
208
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json", "repo_id": "text-generation-inference", "token_count": 1111 }
209
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.83984375, "text": " is" }, { "id": 18147, "logprob": -12.8125, "text": " Deep" }, { "id": 20727, "logprob": -2.84375, "text": " Learning" }, { "id": 32, "logprob": -1.25, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.37890625, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.4296875, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.078125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.515625, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.6015625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.65625, "special": false, "text": " a" }, { "id": 747, "logprob": -2.109375, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.328125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0032653809, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.28125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json", "repo_id": "text-generation-inference", "token_count": 5458 }
210
{ "choices": [ { "delta": { "content": null, "role": "assistant", "tool_calls": { "function": { "arguments": "</s>", "name": null }, "id": "", "index": 0, "type": "function" } }, "finish_reason": "eos_token", "index": 0, "logprobs": null } ], "created": 1710795499, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "1.4.5-native" }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json", "repo_id": "text-generation-inference", "token_count": 319 }
211
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_santacoder.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_santacoder.py", "repo_id": "text-generation-inference", "token_count": 387 }
212
//! Text Generation gRPC client library mod client; #[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod sharded_client; pub use client::Client; pub use pb::generate::v2::HealthResponse; pub use pb::generate::v2::InfoResponse as ShardInfo; pub use pb::generate::v2::{ Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient; use thiserror::Error; use tonic::transport; use tonic::Status; #[derive(Error, Debug, Clone)] pub enum ClientError { #[error("Could not connect to Text Generation server: {0}")] Connection(String), #[error("Server error: {0}")] Generation(String), #[error("Sharded results are empty")] EmptyResults, } impl From<Status> for ClientError { fn from(err: Status) -> Self { let err = Self::Generation(err.message().to_string()); tracing::error!("{err}"); err } } impl From<transport::Error> for ClientError { fn from(err: transport::Error) -> Self { let err = Self::Connection(err.to_string()); tracing::error!("{err}"); err } } pub type Result<T> = std::result::Result<T, ClientError>;
text-generation-inference/router/client/src/lib.rs/0
{ "file_path": "text-generation-inference/router/client/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 464 }
213
# Fork that adds only the correct stream to this kernel in order # to make cuda graphs work. awq_commit := bd1dc2d5254345cc76ab71894651fb821275bdd4 awq: rm -rf llm-awq git clone https://github.com/huggingface/llm-awq build-awq: awq cd llm-awq/ && git fetch && git checkout $(awq_commit) cd llm-awq/awq/kernels && python setup.py build install-awq: build-awq pip uninstall awq_inference_engine -y || true cd llm-awq/awq/kernels && python setup.py install
text-generation-inference/server/Makefile-awq/0
{ "file_path": "text-generation-inference/server/Makefile-awq", "repo_id": "text-generation-inference", "token_count": 183 }
214
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _q4_matmul_cuh #define _q4_matmul_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #include "q4_matrix.cuh" #include "../tuning.h" void q4_matmul_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, const Q4Matrix* w, half* out, bool no_zero, cudaStream_t alt_stream ); void q4_matmul_recons_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, Q4Matrix* w, half* out, bool no_zero, const cublasHandle_t handle ); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh", "repo_id": "text-generation-inference", "token_count": 322 }
215
#include "compat.cuh" __forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __hadd2(result, g_result); } __forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __half2float(__low2half(result)) + __half2float(__high2half(result)); } __forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return result; } typedef void (*fp_gemm_half_q_half_gptq_kernel) ( const half*, const uint32_t*, const uint32_t*, const half*, half*, const int, const int, const int, const int, const int, const uint16_t*, const int, const bool, const half*, const int ); template <int m_count, bool use_r_weights, bool mul_r_weights> __global__ void gemm_half_q_half_gptq_kernel ( const half* __restrict__ a, const uint32_t* __restrict__ b_q_weight, const uint32_t* __restrict__ b_gptq_qzeros, const half* __restrict__ b_gptq_scales, half* __restrict__ c, const int size_m, const int size_n, const int size_k, const int groups, const int groupsize, const uint16_t* __restrict__ b_q_perm, const int rows_4, const bool clear, const half* r_weights, const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); MatrixView_half_rw c_(c, size_m, size_n); MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); int t = threadIdx.x; // Block int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE; int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; // Read weights half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; if constexpr (use_r_weights) { uint16_t any_w = 0; const half* w_ptr = r_weights; for (int m = 0; m < m_count; ++m) { weights[m].as_half = *w_ptr; w_ptr += r_weights_stride; any_w |= weights[m].as_uint16; } if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) } // Preload block_a __shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { for (int m = 0; m < m_count; ++m) { const half* a_ptr = a_.item_ptr(offset_m + m, 0); half* block_a_ptr = block_a[m]; half a0; if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; else a0 = a_ptr[offset_k + t]; block_a_ptr[t] = a0; } } // Zero output if (n >= size_n) return; if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0) { for (int m = 0; m < m_count; m++) *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; } __syncthreads(); // Find initial group int group = offset_k / groupsize; int nextgroup = offset_k + groupsize; // a, b offset int qk = offset_k / (32 / 4); const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; int a_stride = GPTQ_BLOCK_KN_SIZE; // Initial group int zeros[4]; half2 scales[4]; half2 z1z16[4][2]; half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); // __syncthreads(); // Column result half2 block_c[m_count][4] = {}; // Dequantize and multiply int k = offset_k; while (k < end_k) { if (k == nextgroup) { group++; nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); } #pragma unroll for (int j = 0; j < 4; j++) { const int4* b_ptr4 = (int4*) b_ptr; int4 load_int4 = *b_ptr4; half2 dq[4][4]; dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false); dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false); dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false); dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false); #pragma unroll for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); } b_ptr += size_n; a_ptr += 8; } k += 32; } for (int m = 0; m < m_count; m++) { half2 *out = (half2*) c_.item_ptr(offset_m + m, n); half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0])); half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1])); half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2])); half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3])); half2 result01 = __halves2half2(result0, result1); half2 result23 = __halves2half2(result2, result3); if constexpr (mul_r_weights) { half2 w_mul2 = __half2half2(weights[m].as_half); result01 = __hmul2(result01, w_mul2); result23 = __hmul2(result23, w_mul2); } atomicAdd(out , result01); atomicAdd(out + 1, result23); } } template <bool use_r_weights, bool mul_r_weights> struct map_m_count_gptq { static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count) { #if GPTQ_BLOCK_M_SIZE_MAX >= 1 if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 2 if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 3 if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 4 if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 5 if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 6 if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 7 if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 8 if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>; #endif return NULL; } }; fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights) { if (!r_weights && !mul_r_weights) return map_m_count_gptq<false, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if (!r_weights && mul_r_weights) return map_m_count_gptq<false, true>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count); return NULL; }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh", "repo_id": "text-generation-inference", "token_count": 4839 }
216
import torch from loguru import logger from transformers.configuration_utils import PretrainedConfig from transformers.models.auto import modeling_auto from huggingface_hub import hf_hub_download from typing import Optional from pathlib import Path from text_generation_server.utils.speculate import get_speculate, set_speculate from text_generation_server.models.model import Model from text_generation_server.models.causal_lm import CausalLM from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.models.bloom import BLOOMSharded from text_generation_server.models.mpt import MPTSharded from text_generation_server.models.seq2seq_lm import Seq2SeqLM from text_generation_server.models.rw import RW from text_generation_server.models.opt import OPTSharded from text_generation_server.models.galactica import GalacticaSharded from text_generation_server.models.santacoder import SantaCoder from text_generation_server.models.t5 import T5Sharded from text_generation_server.models.gpt_neox import GPTNeoxSharded from text_generation_server.models.phi import Phi # The flag below controls whether to allow TF32 on matmul. This flag defaults to False # in PyTorch 1.12 and later. torch.backends.cuda.matmul.allow_tf32 = True # The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True. torch.backends.cudnn.allow_tf32 = True # Disable gradients torch.set_grad_enabled(False) __all__ = [ "Model", "BLOOMSharded", "CausalLM", "GalacticaSharded", "Seq2SeqLM", "SantaCoder", "OPTSharded", "T5Sharded", "get_model", ] FLASH_ATT_ERROR_MESSAGE = "{} requires Flash Attention enabled models." FLASH_ATTENTION = True try: from text_generation_server.models.flash_rw import FlashRWSharded from text_generation_server.models.flash_neox import FlashNeoXSharded from text_generation_server.models.flash_llama import ( FlashLlama, ) from text_generation_server.models.flash_qwen2 import ( FlashQwen2, ) from text_generation_server.models.flash_cohere import ( FlashCohere, ) from text_generation_server.models.flash_gemma import ( FlashGemma, ) from text_generation_server.models.flash_santacoder import ( FlashSantacoderSharded, ) from text_generation_server.models.idefics import IDEFICSSharded from text_generation_server.models.flash_mistral import FlashMistral from text_generation_server.models.flash_mixtral import FlashMixtral from text_generation_server.models.flash_phi import FlashPhi from text_generation_server.models.flash_starcoder2 import FlashStarcoder2 from text_generation_server.models.flash_dbrx import FlashDbrx from text_generation_server.utils.flash_attn import HAS_FLASH_ATTN_V2_CUDA except ImportError as e: logger.warning(f"Could not import Flash Attention enabled models: {e}") FLASH_ATTENTION = False HAS_FLASH_ATTN_V2_CUDA = False if FLASH_ATTENTION: __all__.append(FlashNeoXSharded) __all__.append(FlashRWSharded) __all__.append(FlashSantacoderSharded) __all__.append(FlashLlama) __all__.append(IDEFICSSharded) __all__.append(FlashMistral) __all__.append(FlashMixtral) __all__.append(FlashDbrx) __all__.append(FlashPhi) __all__.append(FlashQwen2) __all__.append(FlashStarcoder2) __all__.append(FlashGemma) __all__.append(FlashCohere) MAMBA_AVAILABLE = True try: from text_generation_server.models.mamba import Mamba except ImportError as e: logger.warning(f"Could not import Mamba: {e}") MAMBA_AVAILABLE = False if MAMBA_AVAILABLE: __all__.append(Mamba) def get_model( model_id: str, revision: Optional[str], sharded: bool, quantize: Optional[str], speculate: Optional[int], dtype: Optional[str], trust_remote_code: bool, ) -> Model: if dtype is None: # Keep it as default for now and let # every model resolve their own default dtype. dtype = None elif dtype == "float16": dtype = torch.float16 elif dtype == "bfloat16": dtype = torch.bfloat16 else: raise RuntimeError(f"Unknown dtype {dtype}") if speculate is not None: set_speculate(speculate) else: set_speculate(0) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) use_medusa = None if "medusa_num_heads" in config_dict: medusa_model_id = model_id medusa_revision = revision model_id = config_dict["base_model_name_or_path"] revision = "main" speculate_medusa = config_dict["medusa_num_heads"] if speculate is not None: if speculate > speculate_medusa: raise RuntimeError( "Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match" ) else: set_speculate(speculate) else: set_speculate(speculate_medusa) config_dict, _ = PretrainedConfig.get_config_dict( model_id, revision=revision, trust_remote_code=trust_remote_code ) is_local = Path(medusa_model_id).exists() if not is_local: medusa_config = hf_hub_download( medusa_model_id, revision=medusa_revision, filename="config.json" ) hf_hub_download( medusa_model_id, revision=medusa_revision, filename="medusa_lm_head.safetensors", ) use_medusa = Path(medusa_config).parent else: use_medusa = Path(medusa_model_id) method = "medusa" else: method = "n-gram" speculate = get_speculate() if speculate > 0: logger.info(f"Using speculation {method} with {speculate} input ids.") model_type = config_dict.get("model_type", None) if model_type is None: # TODO: fix how we determine model type for Mamba if "ssm_cfg" in config_dict: # *only happens in Mamba case model_type = "ssm" else: raise RuntimeError( f"Could not determine model type for {model_id} revision {revision}" ) if model_type == "ssm": return Mamba( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_id.startswith("facebook/galactica"): return GalacticaSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if ( model_type == "gpt_bigcode" or model_type == "gpt2" and model_id.startswith("bigcode/") ): if FLASH_ATTENTION: return FlashSantacoderSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Santacoder") ) else: return SantaCoder( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "bloom": return BLOOMSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "mpt": return MPTSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "gpt_neox": if FLASH_ATTENTION: return FlashNeoXSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: return GPTNeoxSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi": if FLASH_ATTENTION: return FlashPhi( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "phi-msft": if FLASH_ATTENTION: raise NotImplementedError( "Legacy phi-msft is not supported with Flash Attention" ) else: return Phi( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif model_type == "llama" or model_type == "baichuan": if FLASH_ATTENTION: return FlashLlama( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Llama")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "gemma": if FLASH_ATTENTION: return FlashGemma( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "cohere": if FLASH_ATTENTION: return FlashCohere( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Cohere")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "dbrx": if FLASH_ATTENTION: return FlashDbrx( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded DBRX")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in ["RefinedWeb", "RefinedWebModel", "falcon"]: if sharded: if FLASH_ATTENTION: if config_dict.get("alibi", False): raise NotImplementedError("sharded is not supported for this model") return FlashRWSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format(f"Sharded Falcon")) else: if FLASH_ATTENTION and not config_dict.get("alibi", False): return FlashRWSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: return RW( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "mistral": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashMistral( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mistral")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "mixtral": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashMixtral( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mixtral")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "starcoder2": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashStarcoder2( model_id, revision, quantize=quantize, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError( FLASH_ATT_ERROR_MESSAGE.format("Sharded Starcoder2") ) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "qwen2": sliding_window = config_dict.get("sliding_window", -1) if ( (sliding_window is None or sliding_window == -1) and FLASH_ATTENTION ) or HAS_FLASH_ATTN_V2_CUDA: return FlashQwen2( model_id, revision, quantize=quantize, dtype=dtype, trust_remote_code=trust_remote_code, ) elif sharded: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2")) else: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "opt": return OPTSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "t5": return T5Sharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type == "idefics": if FLASH_ATTENTION: return IDEFICSSharded( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) else: raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics")) if sharded: raise NotImplementedError("sharded is not supported for AutoModel") if quantize == "gptq": raise NotImplementedError( "gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`" ) if quantize == "awq": raise NotImplementedError("awq quantization is not supported for AutoModel") elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"): raise NotImplementedError("4bit quantization is not supported for AutoModel") elif quantize == "eetq": raise NotImplementedError("Eetq quantization is not supported for AutoModel") if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES: return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES: return Seq2SeqLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) auto_map = config_dict.get("auto_map", None) if trust_remote_code and auto_map is not None: if "AutoModelForCausalLM" in auto_map.keys(): return CausalLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) if "AutoModelForSeq2SeqLM" in auto_map.keys(): return Seq2SeqLM( model_id, revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, ) raise ValueError(f"Unsupported model type {model_type}")
text-generation-inference/server/text_generation_server/models/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/__init__.py", "repo_id": "text-generation-inference", "token_count": 10989 }
217
import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, SpeculativeHead, TensorParallelEmbedding, FastLayerNorm, get_linear, ) def load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if config.quantize == "gptq": return _load_multi_mqa_gptq( config, prefix, weights, bias, head_size, num_heads, hidden_size ) else: return _load_multi_mqa( config, prefix, weights, bias, head_size, num_heads, hidden_size ) def _load_multi_mqa_gptq( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if any("c_attn" in k for k in weights.routing.keys()) and not config.transpose: world_size = weights.process_group.size() rank = weights.process_group.rank() slice_ = weights._get_slice(f"{prefix}.c_attn.qweight") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] qweight = torch.cat([q_tensor, kv_tensor], dim=1) qweight = qweight.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.scales") shape = slice_.get_shape() block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] scales = torch.cat([q_tensor, kv_tensor], dim=1) scales = scales.to(device=weights.device) slice_ = weights._get_slice(f"{prefix}.c_attn.qzeros") shape = slice_.get_shape() block_size = (shape[1] - (2 * head_size) * 4 // 32) // world_size start = rank * block_size stop = (rank + 1) * block_size assert 2 * head_size % (32 // 4) == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size * 4 // 32 :] qzeros = torch.cat([q_tensor, kv_tensor], dim=1) qzeros = qzeros.to(device=weights.device) ( bits, groupsize, _, quant_method, ) = weights._get_gptq_params() if quant_method == "gptq": g_idx = weights.get_tensor(f"{prefix}.c_attn.g_idx") g_idx = g_idx.to(device=weights.device) elif quant_method == "awq": g_idx = None from text_generation_server.utils.awq.conversion_utils import ( fast_awq_to_gptq, ) qweight, qzeros = fast_awq_to_gptq(qweight, qzeros) from text_generation_server.utils.layers import HAS_EXLLAMA use_exllama = HAS_EXLLAMA weight = (qweight, qzeros, scales, g_idx, bits, groupsize, use_exllama) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) bias = bias.to(device=weights.device) return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) else: raise NotImplementedError("Gptq loading with santacoder is not implemented") def _load_multi_mqa( config, prefix: str, weights, bias: bool, head_size, num_heads, hidden_size ): if any("c_attn" in k for k in weights.routing.keys()): slice_ = weights._get_slice(f"{prefix}.c_attn.weight") shape = slice_.get_shape() world_size = weights.process_group.size() rank = weights.process_group.rank() if config.transpose: block_size = (shape[1] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[1] - 2 * head_size) % world_size == 0 q_tensor = slice_[:, start:stop] kv_tensor = slice_[:, -2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=1).T else: block_size = (shape[0] - 2 * head_size) // world_size start = rank * block_size stop = (rank + 1) * block_size assert (shape[0] - 2 * head_size) % world_size == 0 q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] weight = torch.cat([q_tensor, kv_tensor], dim=0) if bias: slice_ = weights._get_slice(f"{prefix}.c_attn.bias") shape = slice_.get_shape() block_size = (shape[0] - 2 * head_size) // world_size assert (shape[0] - 2 * head_size) % world_size == 0 start = rank * block_size stop = (rank + 1) * block_size q_tensor = slice_[start:stop] kv_tensor = slice_[-2 * head_size :] bias = torch.cat([q_tensor, kv_tensor], dim=0) else: if config.transpose: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=1).T, weights.get_tensor(f"{prefix}.kv_attn.weight").T, ] weight = torch.cat(w, dim=0) else: w = [ weights.get_sharded(f"{prefix}.q_attn.weight", dim=0), weights.get_tensor(f"{prefix}.kv_attn.weight"), ] weight = torch.cat(w, dim=1) if bias: b = [ weights.get_sharded(f"{prefix}.q_attn.bias", dim=0), weights.get_tensor(f"{prefix}.kv_attn.bias"), ] bias = torch.cat(b, dim=0) else: bias = None weight = weight.to(dtype=weights.dtype).to(device=weights.device) assert list(weight.shape) == [ (num_heads + 2) * head_size, hidden_size, ], f"{weight.shape} != {[(num_heads + 2) * head_size, hidden_size]}" if bias is not None: bias = bias.to(dtype=weights.dtype).to(device=weights.device) assert list(bias.shape) == [ (num_heads + 2) * head_size ], f"{weight.shape} != {[(num_heads + 2) * head_size]}" return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) def load_col(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=1).T else: weight = weights.get_multi_weights_col( [prefix], quantize=config.quantize, dim=0 ) if bias: bias = weights.get_sharded(f"{prefix}.bias", dim=0) else: bias = None return TensorParallelColumnLinear(get_linear(weight, bias, config.quantize)) def load_row(config, prefix: str, weights, bias: bool): if config.transpose: weight = weights.get_sharded(f"{prefix}.weight", dim=0).T else: weight = weights.get_multi_weights_row(prefix, quantize=config.quantize) if bias and weights.process_group.rank() == 0: # Rank is only on the first rank process bias = weights.get_tensor(f"{prefix}.bias") else: bias = None return TensorParallelRowLinear( get_linear(weight, bias, config.quantize), process_group=weights.process_group ) class FlashMQAttention(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() num_heads = config.num_attention_heads hidden_size = config.hidden_size self.num_heads = num_heads self.hidden_size = hidden_size self.head_size = hidden_size // num_heads if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.softmax_scale = self.head_size ** (-0.5) self.c_attn = load_multi_mqa( config, prefix=prefix, weights=weights, bias=True, head_size=self.head_size, hidden_size=hidden_size, num_heads=self.num_heads, ) self.c_proj = load_row( config, prefix=f"{prefix}.c_proj", weights=weights, bias=True ) self.kv_head_mapping = torch.zeros( self.num_heads, dtype=torch.int32, device=weights.device ) def forward( self, hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): qkv = self.c_attn(hidden_states) # Split query from key_value query, key_value = qkv.split( [self.head_size * self.num_heads, 2 * self.head_size], dim=1 ) # Prepare query and key_value for indexing query = query.view(-1, self.num_heads, self.head_size) key_value = key_value.view(-1, 2, 1, self.head_size) paged_attention.reshape_and_cache( key_value[:, 0], key_value[:, 1], kv_cache[0], kv_cache[1], slots ) # output attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(key_value, dim=1, index=0), torch.select(key_value, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.c_proj(attn_output.view(-1, self.num_heads * self.head_size)) class MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.activation_function self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) self.c_fc = load_col( config, prefix=f"{prefix}.c_fc", weights=weights, bias=True ) self.c_proj = load_row( config, prefix=f"{prefix}.c_proj", weights=weights, bias=True ) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) return hidden_states class Block(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"transformer.h.{layer_id}" self.ln_1 = FastLayerNorm.load( prefix=f"{prefix}.ln_1", weights=weights, eps=config.layer_norm_epsilon ) self.ln_2 = FastLayerNorm.load( prefix=f"{prefix}.ln_2", weights=weights, eps=config.layer_norm_epsilon ) self.attn = FlashMQAttention( prefix=f"{prefix}.attn", config=config, weights=weights, ) self.mlp = MLP( prefix=f"{prefix}.mlp", config=config, weights=weights, ) def forward( self, hidden_states, residual, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): hidden_states, residual = self.ln_1(hidden_states, residual) hidden_states = self.attn( hidden_states, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) hidden_states, residual = self.ln_2(hidden_states, residual) mlp_output = self.mlp(hidden_states) return mlp_output, residual class FlashSantacoderModel(nn.Module): def __init__(self, config, weights): super().__init__() self.config = config self.process_group = weights.process_group self.wte = TensorParallelEmbedding( prefix="transformer.wte", weights=weights, reduce=False, ) self.wpe = TensorParallelEmbedding( prefix="transformer.wpe", weights=weights, reduce=False, ) self.h = nn.ModuleList( [ Block( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.ln_f = FastLayerNorm.load( prefix="transformer.ln_f", weights=weights, eps=config.layer_norm_epsilon ) self.head_size = self.h[0].attn.head_size self.num_heads = self.h[0].attn.num_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, ) -> torch.Tensor: hidden_states = self.wte(input_ids) + self.wpe(position_ids) if self.process_group.size() > 1: torch.distributed.all_reduce(hidden_states, group=self.process_group) residual = None for i, layer in enumerate(self.h): hidden_states, residual = layer( hidden_states, residual, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, ) hidden_states, _ = self.ln_f(hidden_states, residual) return hidden_states class FlashSantacoderForCausalLM(nn.Module): def __init__(self, config, weights): super().__init__() self.transformer = FlashSantacoderModel(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="transformer.wte", weights=weights ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.transformer( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_santacoder_modeling.py", "repo_id": "text-generation-inference", "token_count": 8199 }
218
import torch import torch.distributed from opentelemetry import trace from typing import Optional from transformers import AutoTokenizer from transformers.models.gpt2 import GPT2TokenizerFast from text_generation_server.models import FlashCausalLM from text_generation_server.models.custom_modeling.flash_dbrx_modeling import ( FlashDbrxForCausalLM, DbrxConfig, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) tracer = trace.get_tracer(__name__) class FlashDbrx(FlashCausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.bfloat16 if dtype is None else dtype else: raise NotImplementedError("FlashDBRX is only available on GPU") try: tokenizer = GPT2TokenizerFast.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, use_fast=True, from_slow=False, ) except: try: tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, use_fast=True, from_slow=False, ) except: # FIXME: change back to model id once the tokenizer.json is merged tokenizer = GPT2TokenizerFast.from_pretrained( "Xenova/dbrx-instruct-tokenizer", revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, use_fast=True, from_slow=False, ) config = DbrxConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id, revision) model = FlashDbrxForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(FlashDbrx, self).__init__( model=model, tokenizer=tokenizer, num_layers=len(model.model.layers), num_kv_heads=model.model.num_key_value_heads, head_size=model.model.head_size, dtype=dtype, device=device, rank=rank, world_size=world_size, )
text-generation-inference/server/text_generation_server/models/flash_dbrx.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/flash_dbrx.py", "repo_id": "text-generation-inference", "token_count": 1706 }
219
import torch import torch.distributed from transformers import AutoTokenizer, PreTrainedTokenizerBase from typing import Optional import os from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaConfig, ) from loguru import logger from text_generation_server.pb import generate_pb2 from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) from text_generation_server.models.globals import CUDA_GRAPHS, MEM_POOL import time from text_generation_server.models.custom_modeling.mamba_modeling import ( MambaModel, InferenceParams, ) from text_generation_server.models import Model from typing import Any, List, Optional, Tuple, Type, Dict from text_generation_server.models.types import ( Batch, Tokens, Generation, GeneratedText, ) from text_generation_server.utils.tokens import batch_top_tokens, Sampling from dataclasses import dataclass from text_generation_server.utils import NextTokenChooser, StoppingCriteria, Sampling def new_inference_params( n_blocks: int, batch_size: int, d_inner: int, d_conv: int, d_state: int, seqlen_offset: int, dtype: torch.dtype, device: torch.device, ): max_seqlen = 0 conv_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_conv, ), device=device, dtype=dtype, ) ssm_states = torch.zeros( ( n_blocks, batch_size, d_inner, d_state, ), device=device, dtype=dtype, ) inference_params = InferenceParams( max_seqlen=max_seqlen, max_batch_size=batch_size, seqlen_offset=seqlen_offset, conv_states=conv_states, ssm_states=ssm_states, ) return inference_params @dataclass class MambaBatch(Batch): batch_id: int requests: List[generate_pb2.Request] requests_idx_mapping: Dict[int, int] # Decoder values input_ids: torch.Tensor # All tokens all_input_ids: List[torch.Tensor] # Lengths of all generations present in the batch input_lengths: List[int] prefix_offsets: List[int] read_offsets: List[int] # Generation helpers next_token_choosers: List[NextTokenChooser] stopping_criterias: List[StoppingCriteria] top_n_tokens: List[int] top_n_tokens_tensor: torch.Tensor # Metadata used for padding max_input_length: int padding_right_offset: int # Maximum number of tokens this batch will grow to max_tokens: int # Past metadata keys_head_dim_last: bool = True # Inference params inference_params: Optional[Dict[str, Any]] = None def to_pb(self) -> generate_pb2.CachedBatch: return generate_pb2.CachedBatch( id=self.batch_id, request_ids=[r.id for r in self.requests], size=len(self), max_tokens=self.max_tokens, ) @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "MambaBatch": inputs = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] prefix_offsets = [] read_offsets = [] requests_idx_mapping = {} # Parse batch max_truncation = 0 padding_right_offset = 0 max_decode_tokens = 0 for i, r in enumerate(pb.requests): requests_idx_mapping[r.id] = i inputs.append(r.inputs) next_token_choosers.append( NextTokenChooser.from_pb(r.parameters, device, tokenizer) ) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) max_truncation = max(max_truncation, r.truncate) max_decode_tokens += stopping_criteria.max_new_tokens padding_right_offset = max( padding_right_offset, stopping_criteria.max_new_tokens ) tokenized_inputs = tokenizer( inputs, return_tensors="pt", padding=True, return_token_type_ids=False, truncation=True, max_length=max_truncation, ).to(device) for _ in pb.requests: input_len = tokenized_inputs["input_ids"].shape[1] prefix_offsets.append(input_len - 5) read_offsets.append(input_len) input_lengths = tokenized_inputs["attention_mask"].sum(1) max_input_length = input_lengths.max() input_ids = tokenized_inputs["input_ids"] all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1) top_n_tokens_tensor = torch.tensor( top_n_tokens, device=device, dtype=torch.int64 ) max_tokens = len(inputs) * (max_input_length + max_decode_tokens) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, # past_input_ids=None, all_input_ids=list(all_input_ids), input_lengths=input_lengths.tolist(), prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length.item(), padding_right_offset=padding_right_offset, max_tokens=max_tokens, ) def filter(self, request_ids: List[int]) -> Optional["MambaBatch"]: if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self keep_indices = [] # New values after filtering requests_idx_mapping = {} requests = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] max_input_length = 0 next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] total_remaining_decode_tokens = 0 new_padding_right_offset = 0 indices = [] for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i keep_indices.append(idx) requests.append(self.requests[idx]) prefix_offsets.append(self.prefix_offsets[idx]) read_offsets.append(self.read_offsets[idx]) all_input_ids.append(self.all_input_ids[idx]) request_input_length = self.input_lengths[idx] input_lengths.append(request_input_length) max_input_length = max(max_input_length, request_input_length) indices.append(idx) next_token_choosers.append(self.next_token_choosers[idx]) stopping_criteria = self.stopping_criterias[idx] stopping_criterias.append(stopping_criteria) top_n_tokens.append(self.top_n_tokens[idx]) remaining_decode_tokens = ( stopping_criteria.max_new_tokens - stopping_criteria.current_tokens ) total_remaining_decode_tokens += remaining_decode_tokens new_padding_right_offset = max( new_padding_right_offset, remaining_decode_tokens ) # Apply indices to input_ids, attention mask, past key values and other items that need to be cached input_ids = self.input_ids[keep_indices] top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices] max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens self.requests = requests self.requests_idx_mapping = requests_idx_mapping self.input_ids = input_ids self.all_input_ids = all_input_ids self.input_lengths = input_lengths self.prefix_offsets = prefix_offsets self.read_offsets = read_offsets self.next_token_choosers = next_token_choosers self.stopping_criterias = stopping_criterias self.top_n_tokens = top_n_tokens self.top_n_tokens_tensor = top_n_tokens_tensor self.max_input_length = max_input_length self.padding_right_offset = new_padding_right_offset self.max_tokens = max_tokens # TODO # Kept it simple by just updating the state, maybe updating the other CPU values is necessary. self.inference_params.conv_states = self.inference_params.conv_states[ :, indices ] self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices] return self @classmethod def concatenate(cls, batches: List["MambaBatch"]) -> "MambaBatch": # Used for padding total_batch_size = 0 max_input_length = 0 padding_right_offset = 0 for batch in batches: total_batch_size += len(batch) max_input_length = max(max_input_length, batch.max_input_length) padding_right_offset = max(padding_right_offset, batch.padding_right_offset) # Batch attributes requests = [] requests_idx_mapping = {} input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] next_token_choosers = [] stopping_criterias = [] top_n_tokens = [] max_tokens = 0 max_seqlen = 0 seqlen_offset = 0 (n_blocks, _, d_inner, d_conv) = batches[0].inference_params.conv_states.shape (_, _, _, d_state) = batches[0].inference_params.ssm_states.shape dtype = batches[0].inference_params.conv_states.dtype device = batches[0].inference_params.conv_states.device inference_params = new_inference_params( n_blocks=n_blocks, batch_size=total_batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=device, dtype=dtype, ) # Batch tensors input_ids = None top_n_tokens_tensor = None # Used for slicing correctly inside the tensors # Equivalent to a cumsum on batch sizes start_index = 0 for i, batch in enumerate(batches): requests.extend(batch.requests) input_lengths.extend(batch.input_lengths) prefix_offsets.extend(batch.prefix_offsets) read_offsets.extend(batch.read_offsets) all_input_ids.extend(batch.all_input_ids) next_token_choosers.extend(batch.next_token_choosers) stopping_criterias.extend(batch.stopping_criterias) top_n_tokens.extend(batch.top_n_tokens) if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: # We need to offset the mapping for each batch by the cumulative batch size for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + start_index # Slicing end index for this batch end_index = start_index + len(batch) # Create empty tensor # input_ids is always of shape [batch_size, 1] # We do not need to pad it if input_ids is None: input_ids = batch.input_ids.new_empty((total_batch_size, 1)) # Copy to correct indices input_ids[start_index:end_index] = batch.input_ids if top_n_tokens_tensor is None: top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros( total_batch_size, ) top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor # Add eventual padding tokens that were added while concatenating max_tokens += batch.max_tokens + ( max_input_length - batch.max_input_length ) * len(batch) inference_params.max_seqlen = max( inference_params.max_seqlen, batch.inference_params.max_seqlen ) assert batch.inference_params.seqlen_offset != 0, "Invalid seqlen offset" inference_params.seqlen_offset = max( inference_params.seqlen_offset, batch.inference_params.seqlen_offset ) inference_params.conv_states[:, start_index:end_index] = ( batch.inference_params.conv_states ) inference_params.ssm_states[:, start_index:end_index] = ( batch.inference_params.ssm_states ) start_index = end_index return cls( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, all_input_ids=all_input_ids, input_lengths=input_lengths, prefix_offsets=prefix_offsets, read_offsets=read_offsets, next_token_choosers=next_token_choosers, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, max_input_length=max_input_length, padding_right_offset=padding_right_offset, keys_head_dim_last=batches[0].keys_head_dim_last, max_tokens=max_tokens, inference_params=inference_params, ) def __len__(self): return len(self.requests) class Mamba(Model): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, _rank, world_size = initialize_torch_distributed() if world_size > 1: raise RuntimeError("Mamba does not support Tensor Parallelism (TP)") self.cuda_graphs = {} if torch.cuda.is_available(): device = torch.device("cuda") # Bf16 is important. In f16 accumulations in the matmul are causing # differences while the server is under load. # This is detectable by the integration load test dtype = torch.bfloat16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = MambaConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) tokenizer.bos_token_id = config.bos_token_id tokenizer.eos_token_id = config.eos_token_id tokenizer.pad_token = tokenizer.eos_token config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) model = MambaModel(config, weights) torch.distributed.barrier(group=self.process_group) super(Mamba, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, ) @property def batch_type(self) -> Type[MambaBatch]: return MambaBatch def warmup(self, batch) -> Optional[int]: # TODO: implement warmup for Mamba if needed if CUDA_GRAPHS: if self.speculate is None or self.speculate == 0: try: logger.info(f"Cuda Graphs are enabled for sizes {CUDA_GRAPHS}") # Warmup cuda graphs for bs in CUDA_GRAPHS: self.cuda_graph_warmup(bs) except Exception: logger.exception(f"Decode cuda graph warmup failed") return None def cuda_graph_warmup(self, batch_size: int): input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device) n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv # Inner takes the expand multiplication d_inner = self.model.config.d_inner # Important seqlen_offset to go through the update mecanism with the state seqlen_offset = 1 inference_params = new_inference_params( n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype, ) graph = torch.cuda.CUDAGraph() torch.cuda.synchronize() # Run once outside to warmup self.model.forward(input_ids=input_ids, inference_params=inference_params) torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): logits, speculative_logits = self.model.forward( input_ids=input_ids, inference_params=inference_params ) torch.cuda.synchronize() graph_dict = { "input_ids": input_ids, "inference_params": inference_params, "graph": graph, "logits": logits, "speculative_logits": speculative_logits, } self.cuda_graphs[batch_size] = graph_dict def forward( self, input_ids: torch.Tensor, inference_params: Any ) -> Tuple[torch.Tensor, torch.Tensor]: bs = input_ids.shape[0] padded_bs = bs if bs == 3: padded_bs = 4 elif 3 < bs <= 8: padded_bs = 8 elif bs > 8: padded_bs = (bs + 7) // 8 * 8 # Try to find an associated cuda graph cuda_graph = self.cuda_graphs.get(padded_bs, None) is_prefill = inference_params is None or inference_params.seqlen_offset == 0 if is_prefill or cuda_graph is None: return self.model( input_ids, inference_params=inference_params, ) # Copy inputs to the static inputs of the cuda graph # Static inputs are potentially padded cuda_graph["input_ids"][:bs] = input_ids cuda_graph["inference_params"].conv_states[ :, :bs ] = inference_params.conv_states cuda_graph["inference_params"].ssm_states[:, :bs] = inference_params.ssm_states # Replay the graph cuda_graph["graph"].replay() inference_params.conv_states.copy_( cuda_graph["inference_params"].conv_states[:, :bs] ) inference_params.ssm_states.copy_( cuda_graph["inference_params"].ssm_states[:, :bs] ) # Slice output to the correct shape speculative_logits = ( cuda_graph["speculative_logits"][:bs] if cuda_graph["speculative_logits"] is not None else None ) logits = cuda_graph["logits"][:bs] return logits, speculative_logits def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]: start = time.time_ns() input_ids = ( batch.input_ids ) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids batch_size, max_seqlen = input_ids.shape # Inference params if batch.inference_params is None: # 0 is important here seqlen_offset = 0 n_blocks = len(self.model.blocks) d_state = self.model.config.d_state d_conv = self.model.config.d_conv d_inner = self.model.config.d_inner inference_params = new_inference_params( n_blocks=n_blocks, batch_size=batch_size, d_state=d_state, d_conv=d_conv, d_inner=d_inner, seqlen_offset=seqlen_offset, device=self.device, dtype=self.dtype, ) batch.inference_params = inference_params # Forward pass logits, speculative_logits = self.forward( input_ids, inference_params=batch.inference_params ) # batch.inference_params = new_inference_params # Results generations: List[Generation] = [] stopped = True # Speculation is not active for causal accepted_ids = torch.ones_like(batch.input_ids)[:, 0] batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens( batch.top_n_tokens, batch.top_n_tokens_tensor, torch.log_softmax(logits[:, -1], -1), accepted_ids, ) start_decode = time.time_ns() # Zipped iterator iterator = zip( batch.requests, batch.input_lengths, batch.prefix_offsets, batch.read_offsets, logits, batch.next_token_choosers, batch.stopping_criterias, batch.all_input_ids, batch.top_n_tokens, batch_top_token_ids, batch_top_token_logprobs, ) # For each member of the batch for i, ( request, input_length, prefix_offset, read_offset, logits, next_token_chooser, stopping_criteria, all_input_ids, top_n_tokens, top_token_ids, top_token_logprobs, ) in enumerate(iterator): # Select next token next_token_id, logprobs = next_token_chooser( all_input_ids.view(1, -1), logits[-1:, :] ) # Append next token to all tokens all_input_ids = torch.cat([all_input_ids, next_token_id]) new_input_length = input_length + 1 # Generated token next_token_logprob = logprobs[-1, next_token_id] next_token_id_squeezed = next_token_id.squeeze() next_token_text, prefix_offset, read_offset = self.decode_token( all_input_ids[:, 0], prefix_offset, read_offset ) # Evaluate stopping criteria stop, reason = stopping_criteria( next_token_id_squeezed, next_token_text, ) if not stop: stopped = False # Shard generations # All generations will be appended in the rust sharded client if i % self.world_size == self.rank: if stop: # Decode generated tokens output_text, _, _ = self.decode_token( all_input_ids[:, 0], prefix_offset=len(all_input_ids) - stopping_criteria.current_tokens - 1, read_offset=len(all_input_ids) - stopping_criteria.current_tokens, skip_special_tokens=True, ) # Get seed if isinstance(next_token_chooser.choice, Sampling): seed = next_token_chooser.choice.seed else: seed = None generated_text = GeneratedText( output_text, stopping_criteria.current_tokens, reason, seed ) else: generated_text = None if stopping_criteria.current_tokens == 1 and request.prefill_logprobs: # Remove generated token to only have prefill and add nan for first prompt token prefill_logprobs = [float("nan")] + torch.log_softmax( logits, -1 ).gather(1, all_input_ids[1:]).squeeze(1)[ -new_input_length:-1 ].tolist() prefill_token_ids = all_input_ids[-new_input_length:-1] prefill_texts = self.tokenizer.batch_decode( prefill_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) prefill_tokens = Tokens( prefill_token_ids, prefill_logprobs, prefill_texts, is_special=[], ) else: prefill_tokens = None if top_n_tokens > 0: toptoken_texts = self.tokenizer.batch_decode( top_token_ids, clean_up_tokenization_spaces=False, skip_special_tokens=False, ) special_toptokens = [ token_id in self.all_special_ids for token_id in top_token_ids ] top_tokens = Tokens( top_token_ids, top_token_logprobs, toptoken_texts, special_toptokens, ) else: top_tokens = None generation = Generation( request.id, prefill_tokens, Tokens( [next_token_id_squeezed], [next_token_logprob], [next_token_text], [next_token_id_squeezed.item() in self.all_special_ids], ), generated_text, top_tokens, ) generations.append(generation) # Update values batch.next_token_choosers[i] = batch.next_token_choosers[ i ].advance_grammar(next_token_id_squeezed.item()) batch.input_ids[i, 0] = next_token_id batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] = new_input_length batch.prefix_offsets[i] = prefix_offset batch.read_offsets[i] = read_offset batch.max_input_length = max(batch.max_input_length, new_input_length) # We finished all generations in the batch; there is no next batch if stopped: forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, None, (forward_ns, decode_ns) # Slice unused values from prefill batch.input_ids = batch.input_ids[:, :1] forward_ns = start_decode - start decode_ns = time.time_ns() - start_decode return generations, batch, (forward_ns, decode_ns)
text-generation-inference/server/text_generation_server/models/mamba.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/mamba.py", "repo_id": "text-generation-inference", "token_count": 14221 }
220
import datetime import torch import os from loguru import logger from pathlib import Path from safetensors.torch import save_file, load_file, _find_shared_tensors, _is_complete from typing import List, Dict from collections import defaultdict def _remove_duplicate_names( state_dict: Dict[str, torch.Tensor], *, preferred_names: List[str] = None, discard_names: List[str] = None, ) -> Dict[str, List[str]]: if preferred_names is None: preferred_names = [] preferred_names = set(preferred_names) if discard_names is None: discard_names = [] discard_names = set(discard_names) shareds = _find_shared_tensors(state_dict) to_remove = defaultdict(list) for shared in shareds: complete_names = set( [name for name in shared if _is_complete(state_dict[name])] ) if not complete_names: if len(shared) == 1: # Force contiguous name = list(shared)[0] state_dict[name] = state_dict[name].clone() complete_names = {name} else: raise RuntimeError( f"Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue." ) keep_name = sorted(list(complete_names))[0] # Mecanism to preferentially select keys to keep # coming from the on-disk file to allow # loading models saved with a different choice # of keep_name preferred = complete_names.difference(discard_names) if preferred: keep_name = sorted(list(preferred))[0] if preferred_names: preferred = preferred_names.intersection(complete_names) if preferred: keep_name = sorted(list(preferred))[0] for name in sorted(shared): if name != keep_name: to_remove[keep_name].append(name) return to_remove def convert_file(pt_file: Path, sf_file: Path, discard_names: List[str]): """ Convert a pytorch file to a safetensors file This will remove duplicate tensors from the file. Unfortunately, this might not respect *transformers* convention. Forcing us to check for potentially different keys during load when looking for specific tensors (making tensor sharing explicit). """ loaded = torch.load(pt_file, map_location="cpu", weights_only=True) if "state_dict" in loaded: loaded = loaded["state_dict"] to_removes = _remove_duplicate_names(loaded, discard_names=discard_names) metadata = {"format": "pt"} for kept_name, to_remove_group in to_removes.items(): for to_remove in to_remove_group: if to_remove not in metadata: metadata[to_remove] = kept_name del loaded[to_remove] # Force tensors to be contiguous loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_file) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_file, metadata=metadata) reloaded = load_file(sf_file) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f"The output tensors do not match for key {k}") def convert_files(pt_files: List[Path], sf_files: List[Path], discard_names: List[str]): assert len(pt_files) == len(sf_files) N = len(pt_files) # We do this instead of using tqdm because we want to parse the logs with the launcher for i, (pt_file, sf_file) in enumerate(zip(pt_files, sf_files)): # Skip blacklisted files if ( "arguments" in pt_file.name or "args" in pt_file.name or "training" in pt_file.name ): continue start = datetime.datetime.now() convert_file(pt_file, sf_file, discard_names) elapsed = datetime.datetime.now() - start logger.info(f"Convert: [{i + 1}/{N}] -- Took: {elapsed}")
text-generation-inference/server/text_generation_server/utils/convert.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/convert.py", "repo_id": "text-generation-inference", "token_count": 1775 }
221
import re from typing import List, Optional, Tuple import math import torch from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason, GrammarType from text_generation_server.utils.logits_process import ( FrequencyPenaltyLogitsProcessor, GrammarLogitProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, HeterogeneousFrequencyPenaltyLogitsProcessor, HeterogeneousTemperatureLogitsWarper, HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, HeterogeneousTypicalLogitsWarper, HeterogeneousGrammarLogitProcessor, static_warper, ) from text_generation_server.utils.watermark import WatermarkLogitsProcessor from transformers import PreTrainedTokenizerBase, RepetitionPenaltyLogitsProcessor class NextTokenChooser: def __init__( self, watermark: bool = False, temperature: float = 1.0, repetition_penalty: float = 1.0, frequency_penalty: float = 0.0, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, do_sample: bool = False, seed: int = 0, device: str = "cpu", tokenizer: Optional[PreTrainedTokenizerBase] = None, grammar: str = "", grammar_type: GrammarType = GrammarType.GRAMMAR_TYPE_NONE, fsm_grammar_state: int = 0, ): self.watermark_processor = ( WatermarkLogitsProcessor(device=device) if watermark else None ) self.repetition_processor = ( RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty and repetition_penalty != 1.0 else None ) self.frequency_processor = ( FrequencyPenaltyLogitsProcessor(penalty=frequency_penalty) if frequency_penalty and frequency_penalty != 0.0 else None ) self.grammar_processor = ( GrammarLogitProcessor(tokenizer, device, grammar, grammar_type) if grammar != "" else None ) self.tokenizer = tokenizer has_warpers = ( (temperature is not None and temperature != 1.0) or (top_k is not None and top_k != 0) or (top_p is not None and top_p < 1.0) or (typical_p is not None and typical_p < 1.0) ) if has_warpers: self.static_warper = static_warper( temperature=temperature, top_k=top_k, top_p=top_p, typical_p=typical_p ) else: self.static_warper = None sampling = do_sample or has_warpers self.choice = Sampling(seed, device) if sampling else Greedy() self.fsm_grammar_state = fsm_grammar_state self.grammar = grammar def __call__(self, input_ids, scores): if self.watermark_processor is not None: scores = self.watermark_processor(input_ids, scores) if self.repetition_processor is not None: scores = self.repetition_processor(input_ids, scores) if self.frequency_processor is not None: scores = self.frequency_processor(input_ids, scores) if self.grammar_processor is not None: scores = self.grammar_processor(scores, self.fsm_grammar_state) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) else: scores, next_logprob = self.static_warper(scores) next_id = self.choice(scores[-1]).view(1, 1) return next_id, next_logprob def advance_grammar(self, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_state = self.grammar_processor.advance( next_id, self.fsm_grammar_state ) return self @classmethod def from_pb( cls, pb: generate_pb2.NextTokenChooserParameters, device: torch.device, tokenizer: PreTrainedTokenizerBase, ) -> "NextTokenChooser": return NextTokenChooser( watermark=pb.watermark, temperature=pb.temperature, repetition_penalty=pb.repetition_penalty, frequency_penalty=pb.frequency_penalty, top_k=pb.top_k, top_p=pb.top_p, typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device, tokenizer=tokenizer, grammar=pb.grammar, grammar_type=pb.grammar_type, ) class StopSequenceCriteria: def __init__(self, stop_sequence: str): stop_sequence = re.escape(stop_sequence) self.regex = re.compile(f"{stop_sequence}$") def __call__(self, output: str) -> bool: if self.regex.findall(output): return True return False class StoppingCriteria: def __init__( self, eos_token_id: int, stop_sequence_criterias: List[StopSequenceCriteria], max_new_tokens: int = 20, ignore_eos_token: bool = False, ): self.eos_token_id = eos_token_id self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 self.current_output = "" self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: self.current_tokens += 1 if self.current_tokens >= self.max_new_tokens: return True, FinishReason.FINISH_REASON_LENGTH if not self.ignore_eos_token and last_token == self.eos_token_id: return True, FinishReason.FINISH_REASON_EOS_TOKEN if self.stop_sequence_criterias: self.current_output += last_output # There is no need to keep an output that is too long if len(self.current_output) > 300: # Slice to -200 to avoid doing it all the time self.current_output = self.current_output[-200:] for stop_sequence_criteria in self.stop_sequence_criterias: if stop_sequence_criteria(self.current_output): return True, FinishReason.FINISH_REASON_STOP_SEQUENCE return False, None @classmethod def from_pb( cls, pb: generate_pb2.StoppingCriteriaParameters, tokenizer: PreTrainedTokenizerBase, ) -> "StoppingCriteria": stop_sequence_criterias = [ StopSequenceCriteria(sequence) for sequence in pb.stop_sequences ] return StoppingCriteria( tokenizer.eos_token_id, stop_sequence_criterias, pb.max_new_tokens, pb.ignore_eos_token, ) def create_n_gram_speculation( input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool, ): # Very trivial approach, find first match in the string. # This is much less refined than actual n-gram but seems to work # relatively OK in grounded mode and is by far much faster with # much less worst case complexity as everything happens on device. B = accepted_ids.shape[0] device = input_ids.device seeds = next_ids[accepted_ids.cumsum(dim=-1) - 1] indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange( speculate, device=device ) all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) speculative_ids = input_ids.gather(dim=-1, index=all_indices) return speculative_ids class HeterogeneousNextTokenChooser: def __init__( self, dtype: torch.dtype, device: torch.device, watermark: List[bool], temperature: List[float], repetition_penalty: List[float], frequency_penalty: List[float], top_k: List[int], top_p: List[float], typical_p: List[float], do_sample: List[bool], seeds: List[int], tokenizer: PreTrainedTokenizerBase, grammars: List[str], grammar_types: List[int], fsm_grammar_states=List[int], ): warpers = [] self.watermark_processor = ( HeterogeneousProcessorWrapper( { i: WatermarkLogitsProcessor(device=device) for i, do_watermark in enumerate(watermark) if do_watermark } ) if any(watermark) else None ) self.repetition_processor = ( HeterogeneousRepetitionPenaltyLogitsProcessor( repetition_penalty, dtype, device ) if any([x != 1.0 for x in repetition_penalty]) else None ) self.frequency_processor = ( HeterogeneousFrequencyPenaltyLogitsProcessor( frequency_penalty, dtype, device ) if any([x != 0.0 for x in frequency_penalty]) else None ) self.grammar_processor = ( HeterogeneousGrammarLogitProcessor( tokenizer, device, grammars, grammar_types ) if any([grammar != "" for grammar in grammars]) else None ) if any([x != 1.0 for x in temperature]): do_sample = [ sample or x != 1.0 for x, sample in zip(temperature, do_sample) ] warpers.append( HeterogeneousTemperatureLogitsWarper(temperature, dtype, device) ) if any([x != 0 for x in top_k]): do_sample = [sample or x != 0 for x, sample in zip(top_k, do_sample)] warpers.append(HeterogeneousTopKLogitsWarper(top_k, device)) if any([x < 1.0 for x in top_p]): do_sample = [sample or x < 1.0 for x, sample in zip(top_p, do_sample)] warpers.append(HeterogeneousTopPLogitsWarper(top_p, dtype, device)) if any([x < 1.0 for x in typical_p]): do_sample = [sample or x < 1.0 for x, sample in zip(typical_p, do_sample)] warpers.append(HeterogeneousTypicalLogitsWarper(typical_p, dtype, device)) self.warpers = warpers if any(do_sample): self.choice = HeterogeneousSampling(do_sample, seeds, device) else: self.choice = Greedy() self.seeds = seeds self.do_sample = do_sample self.dtype = dtype self.device = device self.tokenizer = tokenizer self.fsm_grammar_states = fsm_grammar_states self.grammars = grammars self.grammar_types = grammar_types def __call__( self, input_ids: torch.Tensor, scores: torch.Tensor, speculate: int, speculated_ids: Optional[torch.Tensor] = None, speculative_scores: Optional[torch.Tensor] = None, verbose=False, ): if speculated_ids is not None: B = scores.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 scores = scores.view(B, S, -1) else: B = scores.shape[0] S = 1 scores = scores.view(B, S, -1) next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) for j in range(S): _scores = scores[:, j] if self.watermark_processor is not None: _scores = self.watermark_processor(input_ids, _scores) if self.repetition_processor is not None: _scores = self.repetition_processor(input_ids, _scores) if self.frequency_processor is not None: _scores = self.frequency_processor(input_ids, _scores) if self.grammar_processor is not None: _scores = self.grammar_processor(_scores, self.fsm_grammar_states) for warper in self.warpers: _scores = warper(input_ids, _scores) _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids next_ids = next_ids.view(B * S) allscores = scores.view(B * S, -1) alllogprobs = torch.log_softmax(allscores, -1) if speculated_ids is not None: accepted_ids = [] B = next_ids.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 indices = [] for i in range(B): _next_ids = next_ids[i * S : (i + 1) * S] _speculated_ids = speculated_ids[i] validate_speculative = _next_ids[:-1] == _speculated_ids index = i * S accepted = 1 # First is always valid indices.append(index) for valid in validate_speculative.tolist(): if valid: index += 1 accepted += 1 indices.append(index) else: break accepted_ids.append(accepted) accepted_ids = torch.tensor( accepted_ids, device=input_ids.device, dtype=input_ids.dtype ) next_ids = next_ids[indices] logprobs = alllogprobs[indices] indices = torch.arange(B, device=input_ids.device) * S if speculative_scores is not None: speculative_scores = speculative_scores[indices + accepted_ids - 1] else: accepted_ids = torch.ones_like(next_ids) logprobs = alllogprobs next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) if speculate > 0: if speculative_scores is not None: # Medusa provided some scores speculative_ids = Greedy()(speculative_scores) else: # n-gram speculative_ids = create_n_gram_speculation( input_ids, next_ids, accepted_ids, speculate, verbose ) else: speculative_ids = None return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids def advance_grammar(self, next_ids: List[int]): if self.grammar_processor is not None: other_new_states = self.grammar_processor.advance_batch( next_ids, self.fsm_grammar_states ) self.fsm_grammar_states = other_new_states return self def advance_grammar_single(self, grammar_state_index: int, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_states[grammar_state_index] = ( self.grammar_processor.advance_at_index( next_id, self.fsm_grammar_states[grammar_state_index], grammar_state_index, ) ) return self def filter(self, indices): if self.watermark_processor is not None: self.watermark_processor = self.watermark_processor.filter(indices) if self.repetition_processor is not None: self.repetition_processor = self.repetition_processor.filter(indices) if self.frequency_processor is not None: self.frequency_processor = self.frequency_processor.filter(indices) if self.grammar_processor is not None: self.grammar_processor = self.grammar_processor.filter(indices) filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) if filtered_warper is not None: filtered_warpers.append(filtered_warper) self.warpers = filtered_warpers self.seeds = [self.seeds[i] for i in indices] self.do_sample = [self.do_sample[i] for i in indices] new_grammars = [] new_fsm_grammar_states = [] new_grammar_types = [] for i in indices: new_grammars.append(self.grammars[i]) new_fsm_grammar_states.append(self.fsm_grammar_states[i]) new_grammar_types.append(self.grammar_types[i]) self.grammars = new_grammars self.fsm_grammar_states = new_fsm_grammar_states self.grammar_types = new_grammar_types if any(self.do_sample): self.choice.filter(indices) else: self.choice = Greedy() return self @classmethod def from_pb( cls, pb: List[generate_pb2.NextTokenChooserParameters], dtype: torch.dtype, device: torch.device, tokenizer: PreTrainedTokenizerBase, fsm_grammar_states: Optional[List[int]] = None, ) -> "HeterogeneousNextTokenChooser": return HeterogeneousNextTokenChooser( watermark=[pb_.watermark for pb_ in pb], temperature=[pb_.temperature for pb_ in pb], repetition_penalty=[pb_.repetition_penalty for pb_ in pb], frequency_penalty=[pb_.frequency_penalty for pb_ in pb], top_k=[pb_.top_k for pb_ in pb], top_p=[pb_.top_p for pb_ in pb], typical_p=[pb_.typical_p for pb_ in pb], do_sample=[pb_.do_sample for pb_ in pb], seeds=[pb_.seed for pb_ in pb], device=device, dtype=dtype, tokenizer=tokenizer, grammars=[pb_.grammar for pb_ in pb], grammar_types=[pb_.grammar_type for pb_ in pb], fsm_grammar_states=( fsm_grammar_states if fsm_grammar_states else [0] * len(pb) ), ) class Sampling: def __init__(self, seed: int, device: str = "cpu"): self.generator = torch.Generator(device) self.generator.manual_seed(seed) self.seed = seed def __call__(self, logits): probs = torch.nn.functional.softmax(logits, -1) # Avoid GPU<->CPU sync done by torch multinomial # See: https://github.com/pytorch/pytorch/blob/925a3788ec5c06db62ca732a0e9425a26a00916f/aten/src/ATen/native/Distributions.cpp#L631-L637 q = torch.empty_like(probs).exponential_(1, generator=self.generator) return probs.div_(q).argmax() class Greedy: def __call__(self, logits): return logits.argmax(dim=-1) class HeterogeneousSampling: r""" Mixed greedy and probabilistic sampling. Compute both and pick the right one for each sample. """ def __init__(self, do_sample: List[bool], seeds: List[int], device: torch.device): self.seeds = seeds self.greedy_indices = [] self.sampling_mapping = {} for i, (sample, seed) in enumerate(zip(do_sample, seeds)): if sample: self.sampling_mapping[i] = Sampling(seed, device) else: self.greedy_indices.append(i) self.greedy = Greedy() def __call__(self, logits): out = torch.empty(logits.shape[0], dtype=torch.int64, device=logits.device) if self.greedy_indices: # Computing for all indices is faster than slicing torch.argmax(logits, -1, out=out) for i, sampling in self.sampling_mapping.items(): out[i] = sampling(logits[i]) return out def filter(self, indices): new_greedy_indices = [] new_sampling_mapping = {} for i, idx in enumerate(indices): if idx in self.sampling_mapping: new_sampling_mapping[i] = self.sampling_mapping[idx] else: new_greedy_indices.append(i) self.greedy_indices = new_greedy_indices self.sampling_mapping = new_sampling_mapping return self def batch_top_tokens( top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned. """ max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size top_n_tokens = [ min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size) ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values nth_highest = torch.gather( sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values top_n_indices = (logprobs >= nth_highest).nonzero() _, top_n_ishes = torch.unique_consecutive(top_n_indices[:, 0], return_counts=True) k = 1 if top_n_ishes.numel() == 0 else top_n_ishes.max() # Take a new topk for these new max n values top_k = torch.topk(logprobs, k=k, dim=1, sorted=True) top_n_ishes = top_n_ishes.tolist() top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() batch_top_token_ids = [] batch_top_token_logprobs = [] accepted_ids_list = accepted_ids.tolist() for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) _top_indices = top_indices[start:stop] _top_values = top_values[start:stop] _top_n_ishes = top_n_ishes[start:stop] _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] _top_n_ishes = _top_n_ishes[:n_accepted_ids] _top_n_tokens = _top_n_tokens[:n_accepted_ids] row_top_token_ids = [] row_top_token_logprobs = [] for idxs, vals, n, req_n in zip( _top_indices, _top_values, _top_n_ishes, _top_n_tokens ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] row_top_token_ids.append(indices) row_top_token_logprobs.append(values) batch_top_token_ids.append(row_top_token_ids) batch_top_token_logprobs.append(row_top_token_logprobs) return batch_top_token_ids, batch_top_token_logprobs
text-generation-inference/server/text_generation_server/utils/tokens.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/tokens.py", "repo_id": "text-generation-inference", "token_count": 10984 }
222
.PHONY: style check-style test DATA_DIR = data dir_guard=@mkdir -p $(@D) # Format source code automatically style: npm run lint # Check the source code is formatted correctly check-style: npm run lint-check TESTS_RESOURCES = $(DATA_DIR)/small.txt $(DATA_DIR)/roberta.json $(DATA_DIR)/tokenizer-wiki.json $(DATA_DIR)/bert-wiki.json # Launch the test suite test: $(TESTS_RESOURCES) npm run test $(DATA_DIR)/big.txt : $(dir_guard) wget https://norvig.com/big.txt -O $@ $(DATA_DIR)/small.txt : $(DATA_DIR)/big.txt head -100 $(DATA_DIR)/big.txt > $@ $(DATA_DIR)/roberta.json : $(dir_guard) wget https://huggingface.co/roberta-large/raw/main/tokenizer.json -O $@ $(DATA_DIR)/tokenizer-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-quicktour/tokenizer.json -O $@ $(DATA_DIR)/bert-wiki.json : $(dir_guard) wget https://s3.amazonaws.com/models.huggingface.co/bert/anthony/doc-pipeline/tokenizer.json -O $@
tokenizers/bindings/node/Makefile/0
{ "file_path": "tokenizers/bindings/node/Makefile", "repo_id": "tokenizers", "token_count": 406 }
223
import { byteLevelPreTokenizer, metaspacePreTokenizer, punctuationPreTokenizer, sequencePreTokenizer, splitPreTokenizer, whitespaceSplitPreTokenizer, } from '../../' describe('byteLevelPreTokenizer', () => { it('instantiates correctly', () => { const processor = byteLevelPreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('metaspacePreTokenizer', () => { it('instantiates correctly without any parameter', () => { const processor = metaspacePreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) it('accepts `undefined` as first parameter', () => { expect(metaspacePreTokenizer(undefined)).toBeDefined() }) it('accepts `undefined` as second parameter', () => { expect(metaspacePreTokenizer('t', undefined)).toBeDefined() }) it('can pre-tokenize strings', () => { const pretok = metaspacePreTokenizer() expect(pretok.preTokenizeString('Hello there friend')).toEqual([ ['▁Hello', [0, 5]], ['▁there', [5, 11]], ['▁friend', [11, 18]], ]) }) }) describe('punctuationPreTokenizer', () => { it('instantiates correctly without any parameter', () => { const processor = punctuationPreTokenizer() expect(processor.constructor.name).toEqual('PreTokenizer') }) it('instantiates correctly with non-default split delimeter', () => { const processor = punctuationPreTokenizer('removed') expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('splitPreTokenizer', () => { it('instantiates correctly with invert parameter', () => { const processor = splitPreTokenizer(' ', 'mergedWithPrevious', false) expect(processor.constructor.name).toEqual('PreTokenizer') }) }) describe('sequencePreTokenizer', () => { it('instantiates correctly', () => { const punctuation = punctuationPreTokenizer() const whitespace = whitespaceSplitPreTokenizer() const sequence2 = sequencePreTokenizer([]) expect(sequence2.constructor.name).toEqual('PreTokenizer') const sequence3 = sequencePreTokenizer([punctuation, whitespace]) expect(sequence3.constructor.name).toEqual('PreTokenizer') }) })
tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/pre-tokenizers.test.ts", "repo_id": "tokenizers", "token_count": 728 }
224
{ "name": "tokenizers-linux-arm64-gnu", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-gnu.node", "files": [ "tokenizers.linux-arm64-gnu.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "glibc" ] }
tokenizers/bindings/node/npm/linux-arm64-gnu/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-gnu/package.json", "repo_id": "tokenizers", "token_count": 289 }
225
use crate::arc_rwlock_serde; use serde::{Deserialize, Serialize}; extern crate tokenizers as tk; use napi::bindgen_prelude::*; use napi_derive::napi; use std::sync::{Arc, RwLock}; use tk::decoders::DecoderWrapper; /// Decoder #[derive(Clone, Serialize, Deserialize)] #[napi] pub struct Decoder { #[serde(flatten, with = "arc_rwlock_serde")] decoder: Option<Arc<RwLock<DecoderWrapper>>>, } #[napi] impl Decoder { #[napi] pub fn decode(&self, tokens: Vec<String>) -> Result<String> { use tk::Decoder; self .decoder .as_ref() .unwrap() .read() .unwrap() .decode(tokens) .map_err(|e| Error::from_reason(format!("{}", e))) } } impl tk::Decoder for Decoder { fn decode_chain(&self, tokens: Vec<String>) -> tk::Result<Vec<String>> { self .decoder .as_ref() .ok_or("Uninitialized Decoder")? .read() .unwrap() .decode_chain(tokens) } } #[napi] pub fn bpe_decoder(suffix: Option<String>) -> Decoder { let suffix = suffix.unwrap_or("</w>".to_string()); let decoder = Some(Arc::new(RwLock::new( tk::decoders::bpe::BPEDecoder::new(suffix).into(), ))); Decoder { decoder } } #[napi] pub fn byte_fallback_decoder() -> Decoder { Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::byte_fallback::ByteFallback::new().into(), ))), } } #[napi] pub fn ctc_decoder( #[napi(ts_arg_type = "string = '<pad>'")] pad_token: Option<String>, word_delimiter_token: Option<String>, cleanup: Option<bool>, ) -> Decoder { let pad_token = pad_token.unwrap_or("<pad>".to_string()); let word_delimiter_token = word_delimiter_token.unwrap_or("|".to_string()); let cleanup = cleanup.unwrap_or(true); let decoder = Some(Arc::new(RwLock::new( tk::decoders::ctc::CTC::new(pad_token, word_delimiter_token, cleanup).into(), ))); Decoder { decoder } } #[napi] pub fn fuse_decoder() -> Decoder { Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::fuse::Fuse::new().into(), ))), } } #[napi] pub fn metaspace_decoder( #[napi(ts_arg_type = "string = '▁'")] replacement: Option<String>, #[napi(ts_arg_type = "prepend_scheme = 'always'")] prepend_scheme: Option<String>, #[napi(ts_arg_type = "split = true")] split: Option<bool>, ) -> Result<Decoder> { use tk::pre_tokenizers::metaspace::PrependScheme; let split = split.unwrap_or(true); let replacement = replacement.unwrap_or("▁".to_string()); if replacement.chars().count() != 1 { return Err(Error::from_reason( "replacement is supposed to be a single char", )); } let replacement = replacement.chars().next().unwrap(); let prepend_scheme: PrependScheme = match prepend_scheme.unwrap_or(String::from("always")).as_str() { "always" => PrependScheme::Always, "first" => PrependScheme::First, "never" => PrependScheme::Never, _ => { return Err(Error::from_reason( "prepend_scheme is supposed to be either 'always', 'first' or 'never'", )); } }; Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::metaspace::Metaspace::new(replacement, prepend_scheme, split).into(), ))), }) } #[napi] pub fn replace_decoder(pattern: String, content: String) -> Result<Decoder> { Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::normalizers::replace::Replace::new(pattern, content) .map_err(|e| Error::from_reason(e.to_string()))? .into(), ))), }) } #[napi] pub fn sequence_decoder(decoders: Vec<&Decoder>) -> Decoder { let sequence: Vec<tk::DecoderWrapper> = decoders .into_iter() .filter_map(|decoder| { decoder .decoder .as_ref() .map(|decoder| (**decoder).read().unwrap().clone()) }) .clone() .collect(); Decoder { decoder: Some(Arc::new(RwLock::new(tk::DecoderWrapper::Sequence( tk::decoders::sequence::Sequence::new(sequence), )))), } } #[napi] pub fn strip_decoder(content: String, left: u32, right: u32) -> Result<Decoder> { let content: char = content.chars().next().ok_or(Error::from_reason( "Expected non empty string for strip pattern", ))?; Ok(Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::strip::Strip::new(content, left as usize, right as usize).into(), ))), }) } #[napi] pub fn word_piece_decoder( #[napi(ts_arg_type = "string = '##'")] prefix: Option<String>, #[napi(ts_arg_type = "bool = true")] cleanup: Option<bool>, ) -> Decoder { let prefix = prefix.unwrap_or("##".to_string()); let cleanup = cleanup.unwrap_or(true); Decoder { decoder: Some(Arc::new(RwLock::new( tk::decoders::wordpiece::WordPiece::new(prefix, cleanup).into(), ))), } }
tokenizers/bindings/node/src/decoders.rs/0
{ "file_path": "tokenizers/bindings/node/src/decoders.rs", "repo_id": "tokenizers", "token_count": 2038 }
226
[target.x86_64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ] [target.aarch64-apple-darwin] rustflags = [ "-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup", "-C", "link-arg=-mmacosx-version-min=10.11", ]
tokenizers/bindings/python/.cargo/config.toml/0
{ "file_path": "tokenizers/bindings/python/.cargo/config.toml", "repo_id": "tokenizers", "token_count": 146 }
227
from .. import decoders Decoder = decoders.Decoder ByteLevel = decoders.ByteLevel Replace = decoders.Replace WordPiece = decoders.WordPiece ByteFallback = decoders.ByteFallback Fuse = decoders.Fuse Strip = decoders.Strip Metaspace = decoders.Metaspace BPEDecoder = decoders.BPEDecoder CTC = decoders.CTC Sequence = decoders.Sequence
tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/decoders/__init__.py", "repo_id": "tokenizers", "token_count": 128 }
228
# Generated content DO NOT EDIT class PostProcessor: """ Base class for all post-processors This class is not supposed to be instantiated directly. Instead, any implementation of a PostProcessor will return an instance of this class when instantiated. """ def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class BertProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Bert model: - a SEP token - a CLS token Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id """ def __init__(self, sep, cls): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class ByteLevel(PostProcessor): """ This post-processor takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor must be used. Args: trim_offsets (:obj:`bool`): Whether to trim the whitespaces from the produced offsets. """ def __init__(self, trim_offsets=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class RobertaProcessing(PostProcessor): """ This post-processor takes care of adding the special tokens needed by a Roberta model: - a SEP token - a CLS token It also takes care of trimming the offsets. By default, the ByteLevel BPE might include whitespaces in the produced tokens. If you don't want the offsets to include these whitespaces, then this PostProcessor should be initialized with :obj:`trim_offsets=True` Args: sep (:obj:`Tuple[str, int]`): A tuple with the string representation of the SEP token, and its id cls (:obj:`Tuple[str, int]`): A tuple with the string representation of the CLS token, and its id trim_offsets (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether to trim the whitespaces from the produced offsets. add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether the add_prefix_space option was enabled during pre-tokenization. This is relevant because it defines the way the offsets are trimmed out. """ def __init__(self, sep, cls, trim_offsets=True, add_prefix_space=True): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class Sequence(PostProcessor): """ Sequence Processor Args: processors (:obj:`List[PostProcessor]`) The processors that need to be chained """ def __init__(self, processors): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass class TemplateProcessing(PostProcessor): """ Provides a way to specify templates in order to add the special tokens to each input sequence as relevant. Let's take :obj:`BERT` tokenizer as an example. It uses two special tokens, used to delimitate each sequence. :obj:`[CLS]` is always used at the beginning of the first sequence, and :obj:`[SEP]` is added at the end of both the first, and the pair sequences. The final result looks like this: - Single sequence: :obj:`[CLS] Hello there [SEP]` - Pair sequences: :obj:`[CLS] My name is Anthony [SEP] What is my name? [SEP]` With the type ids as following:: [CLS] ... [SEP] ... [SEP] 0 0 0 1 1 You can achieve such behavior using a TemplateProcessing:: TemplateProcessing( single="[CLS] $0 [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[("[CLS]", 1), ("[SEP]", 0)], ) In this example, each input sequence is identified using a ``$`` construct. This identifier lets us specify each input sequence, and the type_id to use. When nothing is specified, it uses the default values. Here are the different ways to specify it: - Specifying the sequence, with default ``type_id == 0``: ``$A`` or ``$B`` - Specifying the `type_id` with default ``sequence == A``: ``$0``, ``$1``, ``$2``, ... - Specifying both: ``$A:0``, ``$B:1``, ... The same construct is used for special tokens: ``<identifier>(:<type_id>)?``. **Warning**: You must ensure that you are giving the correct tokens/ids as these will be added to the Encoding without any further check. If the given ids correspond to something totally different in a `Tokenizer` using this `PostProcessor`, it might lead to unexpected results. Args: single (:obj:`Template`): The template used for single sequences pair (:obj:`Template`): The template used when both sequences are specified special_tokens (:obj:`Tokens`): The list of special tokens used in each sequences Types: Template (:obj:`str` or :obj:`List`): - If a :obj:`str` is provided, the whitespace is used as delimiter between tokens - If a :obj:`List[str]` is provided, a list of tokens Tokens (:obj:`List[Union[Tuple[int, str], Tuple[str, int], dict]]`): - A :obj:`Tuple` with both a token and its associated ID, in any order - A :obj:`dict` with the following keys: - "id": :obj:`str` => The special token id, as specified in the Template - "ids": :obj:`List[int]` => The associated IDs - "tokens": :obj:`List[str]` => The associated tokens The given dict expects the provided :obj:`ids` and :obj:`tokens` lists to have the same length. """ def __init__(self, single, pair, special_tokens): pass def num_special_tokens_to_add(self, is_pair): """ Return the number of special tokens that would be added for single/pair sentences. Args: is_pair (:obj:`bool`): Whether the input would be a pair of sequences Returns: :obj:`int`: The number of tokens to add """ pass def process(self, encoding, pair=None, add_special_tokens=True): """ Post-process the given encodings, generating the final one Args: encoding (:class:`~tokenizers.Encoding`): The encoding for the first sequence pair (:class:`~tokenizers.Encoding`, `optional`): The encoding for the pair sequence add_special_tokens (:obj:`bool`): Whether to add the special tokens Return: :class:`~tokenizers.Encoding`: The final encoding """ pass
tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/processors/__init__.pyi", "repo_id": "tokenizers", "token_count": 4779 }
229
use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use crate::token::PyToken; use crate::trainers::PyTrainer; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::{Deserialize, Serialize}; use tk::models::bpe::{BpeBuilder, Merges, Vocab, BPE}; use tk::models::unigram::Unigram; use tk::models::wordlevel::WordLevel; use tk::models::wordpiece::{WordPiece, WordPieceBuilder}; use tk::models::ModelWrapper; use tk::{Model, Token}; use tokenizers as tk; use super::error::{deprecation_warning, ToPyResult}; /// Base class for all models /// /// The model represents the actual tokenization algorithm. This is the part that /// will contain and manage the learned vocabulary. /// /// This class cannot be constructed directly. Please use one of the concrete models. #[pyclass(module = "tokenizers.models", name = "Model", subclass)] #[derive(Clone, Serialize, Deserialize)] pub struct PyModel { #[serde(flatten)] pub model: Arc<RwLock<ModelWrapper>>, } impl PyModel { pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match *self.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => Py::new(py, (PyBPE {}, base))?.into_py(py), ModelWrapper::WordPiece(_) => Py::new(py, (PyWordPiece {}, base))?.into_py(py), ModelWrapper::WordLevel(_) => Py::new(py, (PyWordLevel {}, base))?.into_py(py), ModelWrapper::Unigram(_) => Py::new(py, (PyUnigram {}, base))?.into_py(py), }) } } impl Model for PyModel { type Trainer = PyTrainer; fn tokenize(&self, tokens: &str) -> tk::Result<Vec<Token>> { self.model.read().unwrap().tokenize(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self.model.read().unwrap().get_vocab() } fn get_vocab_size(&self) -> usize { self.model.read().unwrap().get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self.model.read().unwrap().save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self.model.read().unwrap().get_trainer().into() } } impl<I> From<I> for PyModel where I: Into<ModelWrapper>, { fn from(model: I) -> Self { Self { model: Arc::new(RwLock::new(model.into())), } } } #[pymethods] impl PyModel { #[new] #[pyo3(text_signature = None)] fn __new__() -> Self { // Instantiate a default empty model. This doesn't really make sense, but we need // to be able to instantiate an empty model for pickle capabilities. PyModel { model: Arc::new(RwLock::new(BPE::default().into())), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.model).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Model: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.model = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Model: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } /// Tokenize a sequence /// /// Args: /// sequence (:obj:`str`): /// A sequence to tokenize /// /// Returns: /// A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens #[pyo3(text_signature = "(self, sequence)")] fn tokenize(&self, sequence: &str) -> PyResult<Vec<PyToken>> { Ok(ToPyResult(self.model.read().unwrap().tokenize(sequence)) .into_py()? .into_iter() .map(|t| t.into()) .collect()) } /// Get the ID associated to a token /// /// Args: /// token (:obj:`str`): /// A token to convert to an ID /// /// Returns: /// :obj:`int`: The ID associated to the token #[pyo3(text_signature = "(self, tokens)")] fn token_to_id(&self, token: &str) -> Option<u32> { self.model.read().unwrap().token_to_id(token) } /// Get the token associated to an ID /// /// Args: /// id (:obj:`int`): /// An ID to convert to a token /// /// Returns: /// :obj:`str`: The token associated to the ID #[pyo3(text_signature = "(self, id)")] fn id_to_token(&self, id: u32) -> Option<String> { self.model.read().unwrap().id_to_token(id) } /// Save the current model /// /// Save the current model in the given folder, using the given prefix for the various /// files that will get created. /// Any file with the same name that already exists in this folder will be overwritten. /// /// Args: /// folder (:obj:`str`): /// The path to the target folder in which to save the various files /// /// prefix (:obj:`str`, `optional`): /// An optional prefix, used to prefix each file name /// /// Returns: /// :obj:`List[str]`: The list of saved files #[pyo3(text_signature = "(self, folder, prefix)")] fn save<'a>( &self, py: Python<'_>, folder: &str, mut prefix: Option<&'a str>, name: Option<&'a str>, ) -> PyResult<Vec<String>> { if name.is_some() { deprecation_warning( py, "0.10.0", "Parameter `name` of Model.save has been renamed `prefix`", )?; if prefix.is_none() { prefix = name; } } let saved: PyResult<Vec<_>> = ToPyResult(self.model.read().unwrap().save(Path::new(folder), prefix)).into(); Ok(saved? .into_iter() .map(|path| path.to_string_lossy().into_owned()) .collect()) } /// Get the associated :class:`~tokenizers.trainers.Trainer` /// /// Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this /// :class:`~tokenizers.models.Model`. /// /// Returns: /// :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model #[pyo3(text_signature = "(self)")] fn get_trainer(&self, py: Python<'_>) -> PyResult<PyObject> { PyTrainer::from(self.model.read().unwrap().get_trainer()).get_as_subtype(py) } } /// An implementation of the BPE (Byte-Pair Encoding) algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// merges (:obj:`List[Tuple[str, str]]`, `optional`): /// A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` /// /// cache_capacity (:obj:`int`, `optional`): /// The number of words that the BPE cache can contain. The cache allows /// to speed-up the process by keeping the result of the merge operations /// for a number of words. /// /// dropout (:obj:`float`, `optional`): /// A float between 0 and 1 that represents the BPE dropout to use. /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// continuing_subword_prefix (:obj:`str`, `optional`): /// The prefix to attach to subword units that don't represent a beginning of word. /// /// end_of_word_suffix (:obj:`str`, `optional`): /// The suffix to attach to subword units that represent an end of word. /// /// fuse_unk (:obj:`bool`, `optional`): /// Whether to fuse any subsequent unknown tokens into a single one /// /// byte_fallback (:obj:`bool`, `optional`): /// Whether to use spm byte-fallback trick (defaults to False) #[pyclass(extends=PyModel, module = "tokenizers.models", name = "BPE")] pub struct PyBPE {} impl PyBPE { fn with_builder(mut builder: BpeBuilder, kwargs: Option<&PyDict>) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "cache_capacity" => builder = builder.cache_capacity(value.extract()?), "dropout" => { if let Some(dropout) = value.extract()? { builder = builder.dropout(dropout); } } "unk_token" => { if let Some(unk) = value.extract()? { builder = builder.unk_token(unk); } } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(value.extract()?) } "end_of_word_suffix" => builder = builder.end_of_word_suffix(value.extract()?), "fuse_unk" => builder = builder.fuse_unk(value.extract()?), "byte_fallback" => builder = builder.byte_fallback(value.extract()?), _ => println!("Ignored unknown kwarg option {}", key), }; } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing BPE: {}", e ))), Ok(bpe) => Ok((PyBPE {}, bpe.into())), } } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); let model = super_.model.read().unwrap(); if let ModelWrapper::$variant(ref mo) = *model { mo.$($name)+ } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); let mut model = super_.model.write().unwrap(); if let ModelWrapper::$variant(ref mut mo) = *model { mo.$name = $value; } }}; } #[derive(FromPyObject)] enum PyVocab<'a> { Vocab(Vocab), Filename(&'a str), } #[derive(FromPyObject)] enum PyMerges<'a> { Merges(Merges), Filename(&'a str), } #[pymethods] impl PyBPE { #[getter] fn get_dropout(self_: PyRef<Self>) -> Option<f32> { getter!(self_, BPE, dropout) } #[setter] fn set_dropout(self_: PyRef<Self>, dropout: Option<f32>) { setter!(self_, BPE, dropout, dropout); } #[getter] fn get_unk_token(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: Option<String>) { setter!(self_, BPE, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix( self_: PyRef<Self>, continuing_subword_prefix: Option<String>, ) { setter!( self_, BPE, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_end_of_word_suffix(self_: PyRef<Self>) -> Option<String> { getter!(self_, BPE, end_of_word_suffix.clone()) } #[setter] fn set_end_of_word_suffix(self_: PyRef<Self>, end_of_word_suffix: Option<String>) { setter!(self_, BPE, end_of_word_suffix, end_of_word_suffix); } #[getter] fn get_fuse_unk(self_: PyRef<Self>) -> bool { getter!(self_, BPE, fuse_unk) } #[setter] fn set_fuse_unk(self_: PyRef<Self>, fuse_unk: bool) { setter!(self_, BPE, fuse_unk, fuse_unk); } #[getter] fn get_byte_fallback(self_: PyRef<Self>) -> bool { getter!(self_, BPE, byte_fallback) } #[setter] fn set_byte_fallback(self_: PyRef<Self>, byte_fallback: bool) { setter!(self_, BPE, byte_fallback, byte_fallback); } #[new] #[pyo3( signature = (vocab=None, merges=None, **kwargs), text_signature = "(self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, merges: Option<PyMerges>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if (vocab.is_some() && merges.is_none()) || (vocab.is_none() && merges.is_some()) { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both specified", )); } let mut builder = BPE::builder(); if let (Some(vocab), Some(merges)) = (vocab, merges) { match (vocab, merges) { (PyVocab::Vocab(vocab), PyMerges::Merges(merges)) => { builder = builder.vocab_and_merges(vocab, merges); } (PyVocab::Filename(vocab_filename), PyMerges::Filename(merges_filename)) => { deprecation_warning( py, "0.9.0", "BPE.__init__ will not create from files anymore, try `BPE.from_file` instead", )?; builder = builder.files(vocab_filename.to_string(), merges_filename.to_string()); } _ => { return Err(exceptions::PyValueError::new_err( "`vocab` and `merges` must be both be from memory or both filenames", )); } } } PyBPE::with_builder(builder, kwargs) } /// Read a :obj:`vocab.json` and a :obj:`merges.txt` files /// /// This method provides a way to read and parse the content of these files, /// returning the relevant data structures. If you want to instantiate some BPE models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// A :obj:`Tuple` with the vocab and the merges: /// The vocabulary and merges loaded into memory #[staticmethod] #[pyo3(text_signature = "(self, vocab, merges)")] fn read_file(vocab: &str, merges: &str) -> PyResult<(Vocab, Merges)> { BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!( "Error while reading vocab & merges files: {}", e )) }) } /// Instantiate a BPE model from the given files. /// /// This method is roughly equivalent to doing:: /// /// vocab, merges = BPE.read_file(vocab_filename, merges_filename) /// bpe = BPE(vocab, merges) /// /// If you don't need to keep the :obj:`vocab, merges` values lying around, /// this method is more optimized than manually calling /// :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// merges (:obj:`str`): /// The path to a :obj:`merges.txt` file /// /// Returns: /// :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files #[classmethod] #[pyo3(signature = (vocab, merges, **kwargs))] #[pyo3(text_signature = "(cls, vocab, merge, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, merges: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let (vocab, merges) = BPE::read_file(vocab, merges).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading BPE files: {}", e)) })?; Py::new( py, PyBPE::new( py, Some(PyVocab::Vocab(vocab)), Some(PyMerges::Merges(merges)), kwargs, )?, ) } } /// An implementation of the WordPiece algorithm /// /// Args: /// vocab (:obj:`Dict[str, int]`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. /// /// max_input_chars_per_word (:obj:`int`, `optional`): /// The maximum number of characters to authorize in a single word. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordPiece")] pub struct PyWordPiece {} impl PyWordPiece { fn with_builder( mut builder: WordPieceBuilder, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { if let Some(kwargs) = kwargs { for (key, val) in kwargs { let key: &str = key.extract()?; match key { "unk_token" => { builder = builder.unk_token(val.extract()?); } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(val.extract()?); } "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(val.extract()?); } _ => println!("Ignored unknown kwargs option {}", key), } } } match builder.build() { Err(e) => Err(exceptions::PyException::new_err(format!( "Error while initializing WordPiece: {}", e ))), Ok(wordpiece) => Ok((PyWordPiece {}, wordpiece.into())), } } } #[pymethods] impl PyWordPiece { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordPiece, unk_token, unk_token); } #[getter] fn get_continuing_subword_prefix(self_: PyRef<Self>) -> String { getter!(self_, WordPiece, continuing_subword_prefix.clone()) } #[setter] fn set_continuing_subword_prefix(self_: PyRef<Self>, continuing_subword_prefix: String) { setter!( self_, WordPiece, continuing_subword_prefix, continuing_subword_prefix ); } #[getter] fn get_max_input_chars_per_word(self_: PyRef<Self>) -> usize { getter!(self_, WordPiece, max_input_chars_per_word) } #[setter] fn set_max_input_chars_per_word(self_: PyRef<Self>, max: usize) { setter!(self_, WordPiece, max_input_chars_per_word, max); } #[new] #[pyo3(signature = (vocab=None, **kwargs), text_signature = "(self, vocab, unk_token, max_input_chars_per_word)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordPiece::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordPiece.__init__ will not create from files anymore, try `WordPiece.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } } } PyWordPiece::with_builder(builder, kwargs) } /// Read a :obj:`vocab.txt` file /// /// This method provides a way to read and parse the content of a standard `vocab.txt` /// file as used by the WordPiece Model, returning the relevant data structures. If you /// want to instantiate some WordPiece models from memory, this method gives you the /// expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) }) } /// Instantiate a WordPiece model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordPiece.read_file(vocab_filename) /// wordpiece = WordPiece(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to /// initialize a :class:`~tokenizers.models.WordPiece` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.txt` file /// /// Returns: /// :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file #[classmethod] #[pyo3(signature = (vocab, **kwargs))] #[pyo3(text_signature = "(vocab, **kwargs)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, kwargs: Option<&PyDict>, ) -> PyResult<Py<Self>> { let vocab = WordPiece::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordPiece file: {}", e)) })?; Py::new( py, PyWordPiece::new(py, Some(PyVocab::Vocab(vocab)), kwargs)?, ) } } /// An implementation of the WordLevel algorithm /// /// Most simple tokenizer model based on mapping tokens to their corresponding id. /// /// Args: /// vocab (:obj:`str`, `optional`): /// A dictionnary of string keys and their ids :obj:`{"am": 0,...}` /// /// unk_token (:obj:`str`, `optional`): /// The unknown token to be used by the model. #[pyclass(extends=PyModel, module = "tokenizers.models", name = "WordLevel")] pub struct PyWordLevel {} #[pymethods] impl PyWordLevel { #[getter] fn get_unk_token(self_: PyRef<Self>) -> String { getter!(self_, WordLevel, unk_token.clone()) } #[setter] fn set_unk_token(self_: PyRef<Self>, unk_token: String) { setter!(self_, WordLevel, unk_token, unk_token); } #[new] #[pyo3(signature = (vocab=None, unk_token = None), text_signature = "(self, vocab, unk_token)")] fn new( py: Python<'_>, vocab: Option<PyVocab>, unk_token: Option<String>, ) -> PyResult<(Self, PyModel)> { let mut builder = WordLevel::builder(); if let Some(vocab) = vocab { match vocab { PyVocab::Vocab(vocab) => { builder = builder.vocab(vocab); } PyVocab::Filename(vocab_filename) => { deprecation_warning( py, "0.9.0", "WordLevel.__init__ will not create from files anymore, \ try `WordLevel.from_file` instead", )?; builder = builder.files(vocab_filename.to_string()); } }; } if let Some(unk_token) = unk_token { builder = builder.unk_token(unk_token); } Ok(( PyWordLevel {}, builder .build() .map_err(|e| exceptions::PyException::new_err(e.to_string()))? .into(), )) } /// Read a :obj:`vocab.json` /// /// This method provides a way to read and parse the content of a vocabulary file, /// returning the relevant data structures. If you want to instantiate some WordLevel models /// from memory, this method gives you the expected input from the standard files. /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` #[staticmethod] #[pyo3(text_signature = "(vocab)")] fn read_file(vocab: &str) -> PyResult<Vocab> { WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) }) } /// Instantiate a WordLevel model from the given file /// /// This method is roughly equivalent to doing:: /// /// vocab = WordLevel.read_file(vocab_filename) /// wordlevel = WordLevel(vocab) /// /// If you don't need to keep the :obj:`vocab` values lying around, this method is /// more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to /// initialize a :class:`~tokenizers.models.WordLevel` /// /// Args: /// vocab (:obj:`str`): /// The path to a :obj:`vocab.json` file /// /// Returns: /// :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file #[classmethod] #[pyo3(signature = (vocab, unk_token = None))] #[pyo3(text_signature = "(vocab, unk_token)")] fn from_file( _cls: &PyType, py: Python, vocab: &str, unk_token: Option<String>, ) -> PyResult<Py<Self>> { let vocab = WordLevel::read_file(vocab).map_err(|e| { exceptions::PyException::new_err(format!("Error while reading WordLevel file: {}", e)) })?; Py::new( py, PyWordLevel::new(py, Some(PyVocab::Vocab(vocab)), unk_token)?, ) } } /// An implementation of the Unigram algorithm /// /// Args: /// vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): /// A list of vocabulary items and their relative score [("am", -0.2442),...] #[pyclass(extends=PyModel, module = "tokenizers.models", name = "Unigram")] pub struct PyUnigram {} #[pymethods] impl PyUnigram { #[new] #[pyo3(text_signature = "(self, vocab, unk_id, byte_fallback)")] fn new( vocab: Option<Vec<(String, f64)>>, unk_id: Option<usize>, byte_fallback: Option<bool>, ) -> PyResult<(Self, PyModel)> { match (vocab, unk_id, byte_fallback) { (Some(vocab), unk_id, byte_fallback) => { let model = Unigram::from(vocab, unk_id, byte_fallback.unwrap_or(false)).map_err(|e| { exceptions::PyException::new_err(format!( "Error while loading Unigram: {}", e )) })?; Ok((PyUnigram {}, model.into())) } (None, None, _) => Ok((PyUnigram {}, Unigram::default().into())), _ => Err(exceptions::PyValueError::new_err( "`vocab` and `unk_id` must be both specified", )), } } } /// Models Module #[pymodule] pub fn models(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyModel>()?; m.add_class::<PyBPE>()?; m.add_class::<PyWordPiece>()?; m.add_class::<PyWordLevel>()?; m.add_class::<PyUnigram>()?; Ok(()) } #[cfg(test)] mod test { use crate::models::PyModel; use pyo3::prelude::*; use tk::models::bpe::BPE; use tk::models::ModelWrapper; #[test] fn get_subtype() { Python::with_gil(|py| { let py_model = PyModel::from(BPE::default()); let py_bpe = py_model.get_as_subtype(py).unwrap(); assert_eq!("BPE", py_bpe.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let rs_bpe = BPE::default(); let rs_bpe_ser = serde_json::to_string(&rs_bpe).unwrap(); let rs_wrapper: ModelWrapper = rs_bpe.into(); let rs_wrapper_ser = serde_json::to_string(&rs_wrapper).unwrap(); let py_model = PyModel::from(rs_wrapper); let py_ser = serde_json::to_string(&py_model).unwrap(); assert_eq!(py_ser, rs_bpe_ser); assert_eq!(py_ser, rs_wrapper_ser); let py_model: PyModel = serde_json::from_str(&rs_bpe_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; let py_model: PyModel = serde_json::from_str(&rs_wrapper_ser).unwrap(); match *py_model.model.as_ref().read().unwrap() { ModelWrapper::BPE(_) => (), _ => panic!("Expected Bert postprocessor."), }; } }
tokenizers/bindings/python/src/models.rs/0
{ "file_path": "tokenizers/bindings/python/src/models.rs", "repo_id": "tokenizers", "token_count": 14445 }
230
import json import pickle import pytest from tokenizers.decoders import ( CTC, BPEDecoder, ByteLevel, Decoder, Metaspace, Sequence, WordPiece, ByteFallback, Replace, Strip, Fuse, ) class TestByteLevel: def test_instantiate(self): assert ByteLevel() is not None assert isinstance(ByteLevel(), Decoder) assert isinstance(ByteLevel(), ByteLevel) assert isinstance(pickle.loads(pickle.dumps(ByteLevel())), ByteLevel) def test_decoding(self): decoder = ByteLevel() assert decoder.decode(["My", "Ġname", "Ġis", "ĠJohn"]) == "My name is John" def test_manual_reload(self): byte_level = ByteLevel() state = json.loads(byte_level.__getstate__()) reloaded = ByteLevel(**state) assert isinstance(reloaded, ByteLevel) class TestReplace: def test_instantiate(self): assert Replace("_", " ") is not None assert isinstance(Replace("_", " "), Decoder) assert isinstance(Replace("_", " "), Replace) # assert isinstance(pickle.loads(pickle.dumps(Replace("_", " "))), Replace) def test_decoding(self): decoder = Replace("_", " ") assert decoder.decode(["My", "_name", "_is", "_John"]) == "My name is John" class TestWordPiece: def test_instantiate(self): assert WordPiece() is not None assert WordPiece(prefix="__") is not None assert WordPiece(cleanup=True) is not None assert isinstance(WordPiece(), Decoder) assert isinstance(WordPiece(), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece())), WordPiece) def test_decoding(self): decoder = WordPiece() assert decoder.decode(["My", "na", "##me", "is", "Jo", "##hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "##hn"]) == "I'm John" decoder = WordPiece(prefix="__", cleanup=False) assert decoder.decode(["My", "na", "__me", "is", "Jo", "__hn"]) == "My name is John" assert decoder.decode(["I", "'m", "Jo", "__hn"]) == "I 'm John" def test_can_modify(self): decoder = WordPiece(prefix="$$", cleanup=False) assert decoder.prefix == "$$" assert decoder.cleanup == False # Modify these decoder.prefix = "__" assert decoder.prefix == "__" decoder.cleanup = True assert decoder.cleanup == True class TestByteFallback: def test_instantiate(self): assert ByteFallback() is not None assert isinstance(ByteFallback(), Decoder) assert isinstance(ByteFallback(), ByteFallback) assert isinstance(pickle.loads(pickle.dumps(ByteFallback())), ByteFallback) def test_decoding(self): decoder = ByteFallback() assert decoder.decode(["My", " na", "me"]) == "My name" assert decoder.decode(["<0x61>"]) == "a" assert decoder.decode(["<0xE5>"]) == "�" assert decoder.decode(["<0xE5>", "<0x8f>"]) == "��" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>"]) == "叫" assert decoder.decode(["<0xE5>", "<0x8f>", "a"]) == "��a" assert decoder.decode(["<0xE5>", "<0x8f>", "<0xab>", "a"]) == "叫a" class TestFuse: def test_instantiate(self): assert Fuse() is not None assert isinstance(Fuse(), Decoder) assert isinstance(Fuse(), Fuse) assert isinstance(pickle.loads(pickle.dumps(Fuse())), Fuse) def test_decoding(self): decoder = Fuse() assert decoder.decode(["My", " na", "me"]) == "My name" class TestStrip: def test_instantiate(self): assert Strip(left=0, right=0) is not None assert isinstance(Strip(content="_", left=0, right=0), Decoder) assert isinstance(Strip(content="_", left=0, right=0), Strip) assert isinstance(pickle.loads(pickle.dumps(Strip(content="_", left=0, right=0))), Strip) def test_decoding(self): decoder = Strip(content="_", left=1, right=0) assert decoder.decode(["_My", " na", "me", " _-", "__-"]) == "My name _-_-" class TestMetaspace: def test_instantiate(self): assert Metaspace() is not None assert Metaspace(replacement="-") is not None with pytest.raises(ValueError, match="expected a string of length 1"): Metaspace(replacement="") assert Metaspace(prepend_scheme="always") is not None assert isinstance(Metaspace(), Decoder) assert isinstance(Metaspace(), Metaspace) assert isinstance(pickle.loads(pickle.dumps(Metaspace())), Metaspace) def test_decoding(self): decoder = Metaspace() assert decoder.decode(["▁My", "▁name", "▁is", "▁John"]) == "My name is John" decoder = Metaspace(replacement="-", prepend_scheme="never") assert decoder.decode(["-My", "-name", "-is", "-John"]) == " My name is John" def test_can_modify(self): decoder = Metaspace(replacement="*", prepend_scheme="never") assert decoder.replacement == "*" assert decoder.prepend_scheme == "never" # Modify these decoder.replacement = "&" assert decoder.replacement == "&" decoder.prepend_scheme = "first" assert decoder.prepend_scheme == "first" class TestBPEDecoder: def test_instantiate(self): assert BPEDecoder() is not None assert BPEDecoder(suffix="_") is not None assert isinstance(BPEDecoder(), Decoder) assert isinstance(BPEDecoder(), BPEDecoder) assert isinstance(pickle.loads(pickle.dumps(BPEDecoder())), BPEDecoder) def test_decoding(self): decoder = BPEDecoder() assert decoder.decode(["My</w>", "na", "me</w>", "is</w>", "Jo", "hn</w>"]) == "My name is John" decoder = BPEDecoder(suffix="_") assert decoder.decode(["My_", "na", "me_", "is_", "Jo", "hn_"]) == "My name is John" def test_can_modify(self): decoder = BPEDecoder(suffix="123") assert decoder.suffix == "123" # Modify these decoder.suffix = "</w>" assert decoder.suffix == "</w>" class TestCTCDecoder: def test_instantiate(self): assert CTC() is not None assert CTC(pad_token="[PAD]") is not None assert isinstance(CTC(), Decoder) assert isinstance(CTC(), CTC) assert isinstance(pickle.loads(pickle.dumps(CTC())), CTC) def test_decoding(self): decoder = CTC() assert ( decoder.decode(["<pad>", "<pad>", "h", "e", "e", "l", "l", "<pad>", "l", "o", "o", "o", "<pad>"]) == "hello" ) decoder = CTC(pad_token="[PAD]") assert ( decoder.decode(["[PAD]", "[PAD]", "h", "e", "e", "l", "l", "[PAD]", "l", "o", "o", "o", "[PAD]"]) == "hello" ) def test_can_modify(self): decoder = CTC(pad_token="[PAD]") assert decoder.pad_token == "[PAD]" assert decoder.word_delimiter_token == "|" assert decoder.cleanup == True # Modify these decoder.pad_token = "{pad}" assert decoder.pad_token == "{pad}" decoder.word_delimiter_token = "_" assert decoder.word_delimiter_token == "_" decoder.cleanup = False assert decoder.cleanup == False class TestSequenceDecoder: def test_instantiate(self): assert Sequence([]) is not None assert Sequence([CTC()]) is not None assert isinstance(Sequence([]), Decoder) assert isinstance(Sequence([]), Sequence) serialized = pickle.dumps(Sequence([])) assert isinstance(pickle.loads(serialized), Sequence) def test_decoding(self): decoder = Sequence([CTC(), Metaspace()]) initial = ["▁", "▁", "H", "H", "i", "i", "▁", "y", "o", "u"] expected = "Hi you" assert decoder.decode(initial) == expected
tokenizers/bindings/python/tests/bindings/test_decoders.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_decoders.py", "repo_id": "tokenizers", "token_count": 3527 }
231
from tokenizers import CharBPETokenizer from ..utils import data_dir, multiprocessing_with_parallelism, openai_files class TestCharBPETokenizer: def test_basic_encode(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) output = tokenizer.encode("My name is John", "pair") assert output.ids == [0, 253, 1362, 544, 0, 7, 12662, 2688] assert output.tokens == [ "<unk>", "y</w>", "name</w>", "is</w>", "<unk>", "o", "hn</w>", "pair</w>", ] assert output.offsets == [ (0, 1), (1, 2), (3, 7), (8, 10), (11, 12), (12, 13), (13, 15), (0, 4), ] assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 1] def test_lowercase(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) output = tokenizer.encode("My name is John", "pair", add_special_tokens=False) assert output.ids == [547, 1362, 544, 2476, 2688] assert output.tokens == ["my</w>", "name</w>", "is</w>", "john</w>", "pair</w>"] assert output.offsets == [(0, 2), (3, 7), (8, 10), (11, 15), (0, 4)] assert output.type_ids == [0, 0, 0, 0, 1] def test_decoding(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"], lowercase=True) decoded = tokenizer.decode(tokenizer.encode("my name is john").ids) assert decoded == "my name is john" def test_multiprocessing_with_parallelism(self, openai_files): tokenizer = CharBPETokenizer.from_file(openai_files["vocab"], openai_files["merges"]) multiprocessing_with_parallelism(tokenizer, False) multiprocessing_with_parallelism(tokenizer, True) def test_train_from_iterator(self): text = ["A first sentence", "Another sentence", "And a last one"] tokenizer = CharBPETokenizer() tokenizer.train_from_iterator(text, show_progress=False) output = tokenizer.encode("A sentence") assert output.tokens == ["A</w>", "sentence</w>"]
tokenizers/bindings/python/tests/implementations/test_char_bpe.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_char_bpe.py", "repo_id": "tokenizers", "token_count": 1094 }
232
# Tokenizer <tokenizerslangcontent> <python> ## Tokenizer [[autodoc]] tokenizers.Tokenizer - all - decoder - model - normalizer - padding - post_processor - pre_tokenizer - truncation </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/tokenizer.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/tokenizer.mdx", "repo_id": "tokenizers", "token_count": 156 }
233
.highlight .c1, .highlight .sd{ color: #999 } .highlight .nn, .highlight .k, .highlight .s1, .highlight .nb, .highlight .bp, .highlight .kc, .highlight .kt { color: #FB8D68; } .highlight .kn, .highlight .nv, .highlight .s2, .highlight .ow, .highlight .kd, .highlight .kr, .highlight .s { color: #6670FF; } .highlight .gp { color: #FB8D68; }
tokenizers/docs/source/_static/css/code-snippets.css/0
{ "file_path": "tokenizers/docs/source/_static/css/code-snippets.css", "repo_id": "tokenizers", "token_count": 166 }
234
Quicktour ==================================================================================================== Let's have a quick look at the 🤗 Tokenizers library features. The library provides an implementation of today's most used tokenizers that is both easy to use and blazing fast. .. only:: python It can be used to instantiate a :ref:`pretrained tokenizer <pretrained>` but we will start our quicktour by building one from scratch and see how we can train it. Build a tokenizer from scratch ---------------------------------------------------------------------------------------------------- To illustrate how fast the 🤗 Tokenizers library is, let's train a new tokenizer on `wikitext-103 <https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/>`__ (516M of text) in just a few seconds. First things first, you will need to download this dataset and unzip it with: .. code-block:: bash wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip unzip wikitext-103-raw-v1.zip Training the tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. entities:: python BpeTrainer :class:`~tokenizers.trainers.BpeTrainer` vocab_size :obj:`vocab_size` min_frequency :obj:`min_frequency` special_tokens :obj:`special_tokens` unk_token :obj:`unk_token` pad_token :obj:`pad_token` .. entities:: rust BpeTrainer :rust_struct:`~tokenizers::models::bpe::BpeTrainer` vocab_size :obj:`vocab_size` min_frequency :obj:`min_frequency` special_tokens :obj:`special_tokens` unk_token :obj:`unk_token` pad_token :obj:`pad_token` .. entities:: node BpeTrainer BpeTrainer vocab_size :obj:`vocabSize` min_frequency :obj:`minFrequency` special_tokens :obj:`specialTokens` unk_token :obj:`unkToken` pad_token :obj:`padToken` In this tour, we will build and train a Byte-Pair Encoding (BPE) tokenizer. For more information about the different type of tokenizers, check out this `guide <https://huggingface.co/docs/transformers/main/en/tokenizer_summary#summary-of-the-tokenizers>`__ in the 🤗 Transformers documentation. Here, training the tokenizer means it will learn merge rules by: - Start with all the characters present in the training corpus as tokens. - Identify the most common pair of tokens and merge it into one token. - Repeat until the vocabulary (e.g., the number of tokens) has reached the size we want. The main API of the library is the :entity:`class` :entity:`Tokenizer`, here is how we instantiate one with a BPE model: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_tokenizer :end-before: END init_tokenizer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_tokenizer :end-before: END quicktour_init_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_tokenizer :end-before: END init_tokenizer :dedent: 4 To train our tokenizer on the wikitext files, we will need to instantiate a `trainer`, in this case a :entity:`BpeTrainer` .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_trainer :end-before: END init_trainer :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_trainer :end-before: END quicktour_init_trainer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_trainer :end-before: END init_trainer :dedent: 4 We can set the training arguments like :entity:`vocab_size` or :entity:`min_frequency` (here left at their default values of 30,000 and 0) but the most important part is to give the :entity:`special_tokens` we plan to use later on (they are not used at all during training) so that they get inserted in the vocabulary. .. note:: The order in which you write the special tokens list matters: here :obj:`"[UNK]"` will get the ID 0, :obj:`"[CLS]"` will get the ID 1 and so forth. We could train our tokenizer right now, but it wouldn't be optimal. Without a pre-tokenizer that will split our inputs into words, we might get tokens that overlap several words: for instance we could get an :obj:`"it is"` token since those two words often appear next to each other. Using a pre-tokenizer will ensure no token is bigger than a word returned by the pre-tokenizer. Here we want to train a subword BPE tokenizer, and we will use the easiest pre-tokenizer possible by splitting on whitespace. .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_pretok :end-before: END init_pretok :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_pretok :end-before: END quicktour_init_pretok :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_pretok :end-before: END init_pretok :dedent: 4 Now, we can just call the :entity:`Tokenizer.train` method with any list of files we want to use: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START train :end-before: END train :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_train :end-before: END quicktour_train :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START train :end-before: END train :dedent: 4 This should only take a few seconds to train our tokenizer on the full wikitext dataset! To save the tokenizer in one file that contains all its configuration and vocabulary, just use the :entity:`Tokenizer.save` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START save :end-before: END save :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_save :end-before: END quicktour_save :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START save :end-before: END save :dedent: 4 and you can reload your tokenizer from that file with the :entity:`Tokenizer.from_file` :entity:`classmethod`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 12 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_reload_tokenizer :end-before: END quicktour_reload_tokenizer :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START reload_tokenizer :end-before: END reload_tokenizer :dedent: 4 Using the tokenizer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now that we have trained a tokenizer, we can use it on any text we want with the :entity:`Tokenizer.encode` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode :end-before: END encode :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode :end-before: END quicktour_encode :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode :end-before: END encode :dedent: 4 This applied the full pipeline of the tokenizer on the text, returning an :entity:`Encoding` object. To learn more about this pipeline, and how to apply (or customize) parts of it, check out :doc:`this page <pipeline>`. This :entity:`Encoding` object then has all the attributes you need for your deep learning model (or other). The :obj:`tokens` attribute contains the segmentation of your text in tokens: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_tokens :end-before: END print_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_tokens :end-before: END quicktour_print_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_tokens :end-before: END print_tokens :dedent: 4 Similarly, the :obj:`ids` attribute will contain the index of each of those tokens in the tokenizer's vocabulary: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_ids :end-before: END print_ids :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_ids :end-before: END quicktour_print_ids :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_ids :end-before: END print_ids :dedent: 4 An important feature of the 🤗 Tokenizers library is that it comes with full alignment tracking, meaning you can always get the part of your original sentence that corresponds to a given token. Those are stored in the :obj:`offsets` attribute of our :entity:`Encoding` object. For instance, let's assume we would want to find back what caused the :obj:`"[UNK]"` token to appear, which is the token at index 9 in the list, we can just ask for the offset at the index: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_offsets :end-before: END print_offsets :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_offsets :end-before: END quicktour_print_offsets :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_offsets :end-before: END print_offsets :dedent: 4 and those are the indices that correspond to the emoji in the original sentence: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START use_offsets :end-before: END use_offsets :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_use_offsets :end-before: END quicktour_use_offsets :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START use_offsets :end-before: END use_offsets :dedent: 4 Post-processing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We might want our tokenizer to automatically add special tokens, like :obj:`"[CLS]"` or :obj:`"[SEP]"`. To do this, we use a post-processor. :entity:`TemplateProcessing` is the most commonly used, you just have to specify a template for the processing of single sentences and pairs of sentences, along with the special tokens and their IDs. When we built our tokenizer, we set :obj:`"[CLS]"` and :obj:`"[SEP]"` in positions 1 and 2 of our list of special tokens, so this should be their IDs. To double-check, we can use the :entity:`Tokenizer.token_to_id` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START check_sep :end-before: END check_sep :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_check_sep :end-before: END quicktour_check_sep :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START check_sep :end-before: END check_sep :dedent: 4 Here is how we can set the post-processing to give us the traditional BERT inputs: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START init_template_processing :end-before: END init_template_processing :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_init_template_processing :end-before: END quicktour_init_template_processing :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START init_template_processing :end-before: END init_template_processing :dedent: 4 Let's go over this snippet of code in more details. First we specify the template for single sentences: those should have the form :obj:`"[CLS] $A [SEP]"` where :obj:`$A` represents our sentence. Then, we specify the template for sentence pairs, which should have the form :obj:`"[CLS] $A [SEP] $B [SEP]"` where :obj:`$A` represents the first sentence and :obj:`$B` the second one. The :obj:`:1` added in the template represent the `type IDs` we want for each part of our input: it defaults to 0 for everything (which is why we don't have :obj:`$A:0`) and here we set it to 1 for the tokens of the second sentence and the last :obj:`"[SEP]"` token. Lastly, we specify the special tokens we used and their IDs in our tokenizer's vocabulary. To check out this worked properly, let's try to encode the same sentence as before: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_special_tokens :end-before: END print_special_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_special_tokens :end-before: END quicktour_print_special_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_special_tokens :end-before: END print_special_tokens :dedent: 4 To check the results on a pair of sentences, we just pass the two sentences to :entity:`Tokenizer.encode`: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_special_tokens_pair :end-before: END print_special_tokens_pair :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_special_tokens_pair :end-before: END quicktour_print_special_tokens_pair :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_special_tokens_pair :end-before: END print_special_tokens_pair :dedent: 4 You can then check the type IDs attributed to each token is correct with .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_type_ids :end-before: END print_type_ids :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_type_ids :end-before: END quicktour_print_type_ids :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_type_ids :end-before: END print_type_ids :dedent: 4 If you save your tokenizer with :entity:`Tokenizer.save`, the post-processor will be saved along. Encoding multiple sentences in a batch ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To get the full speed of the 🤗 Tokenizers library, it's best to process your texts by batches by using the :entity:`Tokenizer.encode_batch` method: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode_batch :end-before: END encode_batch :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode_batch :end-before: END quicktour_encode_batch :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode_batch :end-before: END encode_batch :dedent: 4 The output is then a list of :entity:`Encoding` objects like the ones we saw before. You can process together as many texts as you like, as long as it fits in memory. To process a batch of sentences pairs, pass two lists to the :entity:`Tokenizer.encode_batch` method: the list of sentences A and the list of sentences B: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START encode_batch_pair :end-before: END encode_batch_pair :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_encode_batch_pair :end-before: END quicktour_encode_batch_pair :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START encode_batch_pair :end-before: END encode_batch_pair :dedent: 4 When encoding multiple sentences, you can automatically pad the outputs to the longest sentence present by using :entity:`Tokenizer.enable_padding`, with the :entity:`pad_token` and its ID (which we can double-check the id for the padding token with :entity:`Tokenizer.token_to_id` like before): .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START enable_padding :end-before: END enable_padding :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_enable_padding :end-before: END quicktour_enable_padding :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START enable_padding :end-before: END enable_padding :dedent: 4 We can set the :obj:`direction` of the padding (defaults to the right) or a given :obj:`length` if we want to pad every sample to that specific number (here we leave it unset to pad to the size of the longest text). .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_batch_tokens :end-before: END print_batch_tokens :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_batch_tokens :end-before: END quicktour_print_batch_tokens :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_batch_tokens :end-before: END print_batch_tokens :dedent: 4 In this case, the `attention mask` generated by the tokenizer takes the padding into account: .. only:: python .. literalinclude:: ../../bindings/python/tests/documentation/test_quicktour.py :language: python :start-after: START print_attention_mask :end-before: END print_attention_mask :dedent: 8 .. only:: rust .. literalinclude:: ../../tokenizers/tests/documentation.rs :language: rust :start-after: START quicktour_print_attention_mask :end-before: END quicktour_print_attention_mask :dedent: 4 .. only:: node .. literalinclude:: ../../bindings/node/examples/documentation/quicktour.test.ts :language: javascript :start-after: START print_attention_mask :end-before: END print_attention_mask :dedent: 4 .. _pretrained: .. only:: python Using a pretrained tokenizer ------------------------------------------------------------------------------------------------ You can load any tokenizer from the Hugging Face Hub as long as a `tokenizer.json` file is available in the repository. .. code-block:: python from tokenizers import Tokenizer tokenizer = Tokenizer.from_pretrained("bert-base-uncased") Importing a pretrained tokenizer from legacy vocabulary files ------------------------------------------------------------------------------------------------ You can also import a pretrained tokenizer directly in, as long as you have its vocabulary file. For instance, here is how to import the classic pretrained BERT tokenizer: .. code-block:: python from tokenizers import BertWordPieceTokenizer tokenizer = BertWordPieceTokenizer("bert-base-uncased-vocab.txt", lowercase=True) as long as you have downloaded the file `bert-base-uncased-vocab.txt` with .. code-block:: bash wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt
tokenizers/docs/source/quicktour.rst/0
{ "file_path": "tokenizers/docs/source/quicktour.rst", "repo_id": "tokenizers", "token_count": 8904 }
235
<div align="center"> <h1><code>wasm-pack-template</code></h1> <strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong> <p> <a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/travis/rustwasm/wasm-pack-template.svg?style=flat-square" alt="Build Status" /></a> </p> <h3> <a href="https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html">Tutorial</a> <span> | </span> <a href="https://discordapp.com/channels/442252698964721669/443151097398296587">Chat</a> </h3> <sub>Built with 🦀🕸 by <a href="https://rustwasm.github.io/">The Rust and WebAssembly Working Group</a></sub> </div> ## About This is an example project showing off a very basic use case for `wasm` tokenizers usage. [**📚 Read this template tutorial! 📚**][template-docs] This template is designed for compiling Rust libraries into WebAssembly and publishing the resulting package to NPM. Be sure to check out [other `wasm-pack` tutorials online][tutorials] for other templates and usages of `wasm-pack`. [tutorials]: https://rustwasm.github.io/docs/wasm-pack/tutorials/index.html [template-docs]: https://rustwasm.github.io/docs/wasm-pack/tutorials/npm-browser-packages/index.html ## 🚴 Usage ### 🐑 Use `cargo generate` to Clone this Template [Learn more about `cargo generate` here.](https://github.com/ashleygwilliams/cargo-generate) ``` cargo generate --git https://github.com/rustwasm/wasm-pack-template.git --name my-project cd my-project ``` ### 🛠️ Build with `wasm-pack build` ``` wasm-pack build ``` ### 🔬 Test in Headless Browsers with `wasm-pack test` ``` wasm-pack test --headless --firefox ``` ### 🎁 Publish to NPM with `wasm-pack publish` ``` wasm-pack publish ``` ## 🔋 Batteries Included * [`wasm-bindgen`](https://github.com/rustwasm/wasm-bindgen) for communicating between WebAssembly and JavaScript. * [`console_error_panic_hook`](https://github.com/rustwasm/console_error_panic_hook) for logging panic messages to the developer console. * [`wee_alloc`](https://github.com/rustwasm/wee_alloc), an allocator optimized for small code size.
tokenizers/tokenizers/examples/unstable_wasm/README.md/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/README.md", "repo_id": "tokenizers", "token_count": 811 }
236
stable
tokenizers/tokenizers/rust-toolchain/0
{ "file_path": "tokenizers/tokenizers/rust-toolchain", "repo_id": "tokenizers", "token_count": 2 }
237
use rand::distributions::WeightedIndex; use rand::prelude::*; use std::cell::RefCell; use std::cmp::{min, Ordering}; use std::collections::BinaryHeap; use std::rc::Rc; type NodeRef = Rc<RefCell<Node>>; type HypothesisRef = Rc<RefCell<Hypothesis>>; type Agenda = BinaryHeap<Hypothesis>; struct Hypothesis { node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64, } impl Hypothesis { pub fn new(node_ref: NodeRef, next: Option<HypothesisRef>, fx: f64, gx: f64) -> Self { Self { node_ref, next, fx, gx, } } } impl PartialEq for Hypothesis { fn eq(&self, other: &Self) -> bool { self.fx == other.fx } } impl Eq for Hypothesis {} impl PartialOrd for Hypothesis { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } // TODO Maybe use Ordered Floats (https://docs.rs/ordered-float/1.0.2/ordered_float/) impl Ord for Hypothesis { fn cmp(&self, other: &Self) -> Ordering { if self.fx < other.fx { Ordering::Less } else { Ordering::Greater } } } /// Structure to implement Viterbi algorithm to find the best encoding, or sample /// from all possible encodings of a given sentence. #[derive(Debug)] pub struct Lattice<'a> { pub(super) sentence: &'a str, len: usize, nodes: Vec<NodeRef>, pub(super) begin_nodes: Vec<Vec<NodeRef>>, pub(super) end_nodes: Vec<Vec<NodeRef>>, _bos_id: usize, _eos_id: usize, } impl std::fmt::Display for Lattice<'_> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let display_pieces = |nodes: &Vec<Vec<NodeRef>>| { nodes .iter() .map(|l| { l.iter() .map(|n| self.piece(&n.borrow())) .collect::<Vec<_>>() }) .collect::<Vec<_>>() }; f.debug_struct("Lattice") .field("sentence", &self.sentence) .field("begin_nodes", &display_pieces(&self.begin_nodes)) .field("end_nodes", &display_pieces(&self.end_nodes)) .finish() } } /// A node from the lattice, that helps reconstruct the underlying `String` #[derive(Debug, Clone)] pub struct Node { // Vocabulary id pub(super) id: usize, // Local lattice identifier pub(super) node_id: usize, pos: usize, length: usize, prev: Option<NodeRef>, backtrace_score: f64, score: f64, } impl PartialEq for Node { fn eq(&self, other: &Node) -> bool { self.id == other.id } } impl Node { pub fn new(id: usize, node_id: usize, pos: usize, length: usize, score: f64) -> Self { Self { id, node_id, pos, length, prev: None, score, backtrace_score: 0.0, } } } /// Returns log(exp(x) + exp(y)). /// if init_mode is true, returns log(exp(y)) == y. /// log(\sum_i exp(a[i])) can be computed as /// for (int i = 0; i < a.size(); ++i) /// x = LogSumExp(x, a[i], i == 0); fn log_sum_exp(x: f64, y: f64, init_mode: bool) -> f64 { if init_mode { y } else { let (vmin, vmax) = if x > y { (y, x) } else { (x, y) }; let k_minus_log_epsilon = 50.0; if vmax > vmin + k_minus_log_epsilon { vmax } else { vmax + ((vmin - vmax).exp() + 1.0).ln() } } } impl<'a> Lattice<'a> { pub fn from(sentence: &'a str, bos_id: usize, eos_id: usize) -> Self { let len = sentence.len(); let k_reserved_node_size = 16; // We are adding 2 tokens, bos and eos let mut nodes: Vec<NodeRef> = Vec::with_capacity(k_reserved_node_size); let mut begin_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let mut end_nodes = vec![Vec::with_capacity(k_reserved_node_size); len + 1]; let bos = Rc::new(RefCell::new(Node::new(bos_id, 0, 0, 0, 0.0))); let eos = Rc::new(RefCell::new(Node::new(eos_id, 1, len, 0, 0.0))); begin_nodes[len].push(Rc::clone(&eos)); end_nodes[0].push(Rc::clone(&bos)); nodes.push(bos); nodes.push(eos); Self { sentence, len, nodes, begin_nodes, end_nodes, _bos_id: bos_id, _eos_id: eos_id, } } pub fn insert(&mut self, pos: usize, length: usize, score: f64, id: usize) { let node_id = self.nodes.len(); let node = Rc::new(RefCell::new(Node::new(id, node_id, pos, length, score))); self.begin_nodes[pos].push(Rc::clone(&node)); self.end_nodes[pos + length].push(Rc::clone(&node)); self.nodes.push(node); } pub fn viterbi(&mut self) -> Vec<NodeRef> { let len = self.len; let mut pos = 0; while pos <= len { if self.begin_nodes[pos].is_empty() { return vec![]; } for rnode in &self.begin_nodes[pos] { rnode.borrow_mut().prev = None; let mut best_score = 0.0; let mut best_node: Option<NodeRef> = None; for lnode in &self.end_nodes[pos] { let score = lnode.borrow().backtrace_score + rnode.borrow().score; if best_node.is_none() || score > best_score { // TODO can we remove this clone ? best_node = Some(lnode.clone()); best_score = score } } match best_node { Some(bnode) => { rnode.borrow_mut().prev = Some(Rc::clone(&bnode)); rnode.borrow_mut().backtrace_score = best_score; } None => return vec![], } } if let Some(c) = self.sentence[pos..].chars().next() { pos += c.len_utf8(); } else { break; } } let mut results: Vec<NodeRef> = vec![]; let root = self.begin_nodes[len][0].borrow(); let prev = root.prev.as_ref(); if prev.is_none() { return vec![]; } let mut node: NodeRef = prev.unwrap().clone(); while node.borrow().prev.is_some() { results.push(node.clone()); let n = node.borrow().clone(); node = n.prev.as_ref().unwrap().clone(); } results.reverse(); results } pub fn piece(&self, node: &Node) -> String { self.sentence[node.pos..node.pos + node.length].to_owned() } pub fn tokens(&mut self) -> Vec<String> { self.viterbi() .iter() .map(|node| self.piece(&node.borrow())) .collect() } pub fn nbest(&mut self, n: usize) -> Vec<Vec<NodeRef>> { match n { 0 => vec![], 1 => vec![self.viterbi()], _ => { // let k_reserved_hypothesis_size = 512; let mut agenda: Agenda = BinaryHeap::new(); let mut hypotheses: Vec<Vec<NodeRef>> = vec![]; let eos = self.eos_node(); let score = eos.borrow().score; let hypo = Hypothesis::new(eos, None, score, score); agenda.push(hypo); // Fill backtrace scores self.viterbi(); while !agenda.is_empty() { let top = Rc::new(RefCell::new(agenda.pop().unwrap())); let node = Rc::clone(&top.borrow().node_ref); if node.borrow().id == self.bos_node().borrow().id { let mut hypothesis = vec![]; let mut next: HypothesisRef = Rc::clone(top.borrow().next.as_ref().unwrap()); while next.borrow().next.is_some() { hypothesis.push(next.borrow().node_ref.clone()); let c: HypothesisRef = next.clone(); // let c: Ref<Hypothesis> = next.clone().borrow(); next = Rc::clone(c.borrow().next.as_ref().unwrap()); } hypotheses.push(hypothesis); if hypotheses.len() == n { return hypotheses; } } else { for lnode in &self.end_nodes[node.borrow().pos] { let top_gx = top.borrow().gx; let fx = lnode.borrow().backtrace_score + top_gx; let gx = lnode.borrow().score + top_gx; let hyp = Hypothesis::new(Rc::clone(lnode), Some(Rc::clone(&top)), fx, gx); agenda.push(hyp); } // When the input is too long or contains duplicated phrases, // `agenda` will get extremely big. Here we avoid this case by // dynamically shrinking the agenda. let k_max_agenda_size = 100_000; let k_min_agenda_size = 512; if agenda.len() > k_max_agenda_size { let mut new_agenda = BinaryHeap::new(); let len = min(k_min_agenda_size, n * 10); for _i in 0..len { new_agenda.push(agenda.pop().unwrap()); } agenda = new_agenda; } } } hypotheses } } } pub fn nbest_tokens(&mut self, n: usize) -> Vec<Vec<String>> { self.nbest(n) .iter() .map(|v| v.iter().map(|node| self.piece(&node.borrow())).collect()) .collect() } pub fn len(&self) -> usize { self.len } pub fn is_empty(&self) -> bool { self.len == 0 } pub fn bos_node(&self) -> NodeRef { Rc::clone(&self.end_nodes[0][0]) } pub fn eos_node(&self) -> NodeRef { Rc::clone(&self.begin_nodes[self.len][0]) } pub fn surface(&self, n: usize) -> &str { match self.sentence.char_indices().nth(n) { Some((pos, _)) => &self.sentence[pos..], None => "", } } pub fn sentence(&self) -> &str { self.sentence } pub fn populate_marginal(&self, freq: f64, expected: &mut [f64]) -> f64 { let len = self.len(); let n_nodes = self.nodes.len(); let mut alpha = vec![0.0; n_nodes]; let mut beta = vec![0.0; n_nodes]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], lnode.borrow().score + alpha[lid], *lnode == self.end_nodes[pos][0], ); } } } for pos in (0..=len).rev() { // let rpos = len - pos; for lnode in &self.end_nodes[pos] { for rnode in &self.begin_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; beta[lid] = log_sum_exp( beta[lid], rnode.borrow().score + beta[rid], *rnode == self.begin_nodes[pos][0], ); } } } let eos_id = self.begin_nodes[len][0].borrow().node_id; let z = alpha[eos_id]; for pos in 0..len { for node in &self.begin_nodes[pos] { let node_id = node.borrow().node_id; let id = node.borrow().id; let a = alpha[node_id]; let b = beta[node_id]; let total = a + node.borrow().score + b - z; let update = freq * total.exp(); expected[id] += update; } } freq * z } pub fn sample(&self, theta: f64) -> Vec<NodeRef> { let len = self.len(); if len == 0 { return vec![]; } let mut alpha = vec![0.0; self.nodes.len()]; for pos in 0..=len { for rnode in &self.begin_nodes[pos] { for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; let rid = rnode.borrow().node_id; alpha[rid] = log_sum_exp( alpha[rid], theta * (lnode.borrow().score + alpha[lid]), *lnode == self.end_nodes[pos][0], ); } } } let mut rng = thread_rng(); let mut results: Vec<NodeRef> = vec![]; let mut probs: Vec<f64> = vec![]; let mut z = alpha[self.eos_node().borrow().node_id]; let mut node = self.eos_node(); loop { probs.clear(); let pos = node.borrow().pos; for lnode in &self.end_nodes[pos] { let lid = lnode.borrow().node_id; probs.push((alpha[lid] + theta * lnode.borrow().score - z).exp()) } let dist = WeightedIndex::new(&probs).unwrap(); let index = dist.sample(&mut rng); node = Rc::clone(&self.end_nodes[pos][index]); if node == self.bos_node() { break; } z = alpha[node.borrow().node_id]; results.push(Rc::clone(&node)); } results.reverse(); results } pub fn sample_token(&self, theta: f64) -> Vec<String> { self.sample(theta) .iter() .map(|node| self.piece(&node.borrow())) .collect() } } #[cfg(test)] mod tests { use super::*; use assert_approx_eq::assert_approx_eq; #[test] fn set_sentence() { let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); let lattice = Lattice::from("", 1, 2); assert_eq!(lattice.len(), 0); assert_eq!(lattice.sentence(), ""); assert_eq!(lattice.surface(0), ""); let lattice = Lattice::from("test", 1, 2); assert_eq!(lattice.len(), 4); assert_eq!(lattice.sentence(), "test"); assert_eq!(lattice.surface(0), "test"); assert_eq!(lattice.surface(1), "est"); assert_eq!(lattice.surface(2), "st"); assert_eq!(lattice.surface(3), "t"); let bos = lattice.bos_node(); let eos = lattice.eos_node(); assert_eq!(bos.borrow().id, 1); assert_eq!(eos.borrow().id, 2); assert_eq!( lattice.end_nodes[0].first().unwrap().borrow().id, bos.borrow().id ); assert_eq!( lattice.begin_nodes[4].first().unwrap().borrow().id, eos.borrow().id ); let lattice = Lattice::from("テストab", 1, 2); assert_eq!(lattice.len(), 11); assert_eq!(lattice.sentence(), "テストab"); assert_eq!(lattice.surface(0), "テストab"); assert_eq!(lattice.surface(1), "ストab"); assert_eq!(lattice.surface(2), "トab"); assert_eq!(lattice.surface(3), "ab"); assert_eq!(lattice.surface(4), "b"); } #[test] fn insert_test() { let mut lattice = Lattice::from("ABあい", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 3, 0.0, 5); lattice.insert(5, 3, 0.0, 6); lattice.insert(0, 2, 0.0, 7); lattice.insert(1, 4, 0.0, 8); lattice.insert(2, 6, 0.0, 9); // 0 & 1 are bos and eos let node0 = lattice.nodes[2].borrow(); let node1 = lattice.nodes[3].borrow(); let node2 = lattice.nodes[4].borrow(); let node3 = lattice.nodes[5].borrow(); let node4 = lattice.nodes[6].borrow(); let node5 = lattice.nodes[7].borrow(); let node6 = lattice.nodes[8].borrow(); assert_eq!(lattice.piece(&node0), "A"); assert_eq!(lattice.piece(&node1), "B"); assert_eq!(lattice.piece(&node2), "あ"); assert_eq!(lattice.piece(&node3), "い"); assert_eq!(lattice.piece(&node4), "AB"); assert_eq!(lattice.piece(&node5), "Bあ"); assert_eq!(lattice.piece(&node6), "あい"); assert_eq!(node0.pos, 0); assert_eq!(node1.pos, 1); assert_eq!(node2.pos, 2); assert_eq!(node3.pos, 5); assert_eq!(node4.pos, 0); assert_eq!(node5.pos, 1); assert_eq!(node6.pos, 2); assert_eq!(node0.length, 1); assert_eq!(node1.length, 1); assert_eq!(node2.length, 3); assert_eq!(node3.length, 3); assert_eq!(node4.length, 2); assert_eq!(node5.length, 4); assert_eq!(node6.length, 6); assert_eq!(lattice.bos_node().borrow().id, 1); assert_eq!(lattice.eos_node().borrow().id, 2); assert_eq!(node0.id, 3); assert_eq!(node1.id, 4); assert_eq!(node2.id, 5); assert_eq!(node3.id, 6); assert_eq!(node4.id, 7); assert_eq!(node5.id, 8); assert_eq!(node6.id, 9); assert_eq!(lattice.begin_nodes[0].len(), 2); assert_eq!(lattice.begin_nodes[1].len(), 2); assert_eq!(lattice.begin_nodes[2].len(), 2); assert_eq!(lattice.begin_nodes[5].len(), 1); assert_eq!(lattice.begin_nodes[8].len(), 1); assert_eq!(lattice.end_nodes[0].len(), 1); assert_eq!(lattice.end_nodes[1].len(), 1); assert_eq!(lattice.end_nodes[2].len(), 2); assert_eq!(lattice.end_nodes[5].len(), 2); assert_eq!(lattice.end_nodes[8].len(), 2); assert_eq!(lattice.begin_nodes[0][0].borrow().id, node0.id); assert_eq!(lattice.begin_nodes[0][1].borrow().id, node4.id); assert_eq!(lattice.begin_nodes[1][0].borrow().id, node1.id); assert_eq!(lattice.begin_nodes[1][1].borrow().id, node5.id); assert_eq!(lattice.begin_nodes[2][0].borrow().id, node2.id); assert_eq!(lattice.begin_nodes[2][1].borrow().id, node6.id); assert_eq!(lattice.begin_nodes[5][0].borrow().id, node3.id); assert_eq!( lattice.eos_node().borrow().id, lattice.begin_nodes[8][0].borrow().id ); assert_eq!( lattice.bos_node().borrow().id, lattice.end_nodes[0][0].borrow().id ); assert_eq!(node0.id, lattice.end_nodes[1][0].borrow().id); assert_eq!(node1.id, lattice.end_nodes[2][0].borrow().id); assert_eq!(node4.id, lattice.end_nodes[2][1].borrow().id); assert_eq!(node2.id, lattice.end_nodes[5][0].borrow().id); assert_eq!(node5.id, lattice.end_nodes[5][1].borrow().id); assert_eq!(node3.id, lattice.end_nodes[8][0].borrow().id); assert_eq!(node6.id, lattice.end_nodes[8][1].borrow().id); } #[test] fn test_viterbi() { let mut lattice = Lattice::from("ABC", 1, 2); assert_eq!(lattice.viterbi(), vec![]); // Still incomplete lattice.insert(0, 1, 0.0, 3); assert_eq!(lattice.viterbi(), vec![]); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); // XXX: In sentence piece this is not tested, still incomplete ? assert_eq!(lattice.viterbi().len(), 3); } #[test] fn test_viterbi2() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); assert_eq!(lattice.tokens(), ["A", "B", "C"]); lattice.insert(0, 2, 2.0, 6); assert_eq!(lattice.tokens(), ["AB", "C"]); lattice.insert(1, 2, 5.0, 7); assert_eq!(lattice.tokens(), ["A", "BC"]); lattice.insert(0, 3, 10.0, 8); assert_eq!(lattice.tokens(), ["ABC"]); } #[test] fn test_nbest() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 0.0, 3); lattice.insert(1, 1, 0.0, 4); lattice.insert(2, 1, 0.0, 5); lattice.insert(0, 2, 2.0, 6); lattice.insert(1, 2, 5.0, 7); lattice.insert(0, 3, 10.0, 8); let nbests = lattice.nbest_tokens(10); assert_eq!( nbests, vec![ vec!["ABC"], vec!["A", "BC"], vec!["AB", "C"], vec!["A", "B", "C"] ] ); assert!(lattice.nbest_tokens(0).is_empty()); assert_eq!(lattice.nbest_tokens(1), vec![vec!["ABC"]]); } #[test] fn test_log_sum_exp() { let mut x = 0.0; let v: Vec<f64> = vec![1.0, 2.0, 3.0]; for (i, y) in v.iter().enumerate() { x = log_sum_exp(x, *y, i == 0); } assert_approx_eq!(x, v.iter().map(|n| n.exp()).sum::<f64>().ln(), 0.001); } #[test] fn test_populate() { let mut lattice = Lattice::from("ABC", 1, 2); lattice.insert(0, 1, 1.0, 3); // A lattice.insert(1, 1, 1.2, 4); // B lattice.insert(2, 1, 2.5, 5); // C lattice.insert(0, 2, 3.0, 6); // AB lattice.insert(1, 2, 4.0, 7); // BC lattice.insert(0, 3, 2.0, 8); // ABC let mut probs = vec![0.0; 9]; let p1 = (1.0_f64 + 1.2 + 2.5).exp(); let p2 = (3.0_f64 + 2.5).exp(); let p3 = (1.0_f64 + 4.0).exp(); let p4 = 2.0_f64.exp(); let z = p1 + p2 + p3 + p4; let log_z = lattice.populate_marginal(1.0, &mut probs); assert_approx_eq!(log_z, z.ln(), 0.001); assert_approx_eq!(probs[0], 0.0, 0.001); assert_approx_eq!(probs[1], 0.0, 0.001); assert_approx_eq!(probs[2], 0.0, 0.001); assert_approx_eq!(probs[3], (p1 + p3) / z, 0.001); assert_approx_eq!(probs[4], (p1) / z, 0.001); assert_approx_eq!(probs[5], (p1 + p2) / z, 0.001); assert_approx_eq!(probs[6], (p2) / z, 0.001); assert_approx_eq!(probs[7], (p3) / z, 0.001); assert_approx_eq!(probs[8], (p4) / z, 0.001); } }
tokenizers/tokenizers/src/models/unigram/lattice.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/lattice.rs", "repo_id": "tokenizers", "token_count": 12682 }
238
use crate::tokenizer::pattern::Pattern; use crate::tokenizer::Decoder; use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::SysRegex; use serde::{Deserialize, Serialize}; /// Represents the different patterns that `Replace` can use #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub enum ReplacePattern { String(String), Regex(String), } impl From<String> for ReplacePattern { fn from(v: String) -> Self { Self::String(v) } } impl From<&str> for ReplacePattern { fn from(v: &str) -> Self { Self::String(v.to_owned()) } } /// We use this custom deserializer to provide the value for `regex` for `Replace` #[doc(hidden)] #[derive(Deserialize)] #[serde(tag = "type")] struct ReplaceDeserializer { pattern: ReplacePattern, content: String, } impl std::convert::TryFrom<ReplaceDeserializer> for Replace { type Error = Box<dyn std::error::Error + Send + Sync>; fn try_from(v: ReplaceDeserializer) -> Result<Self> { Self::new(v.pattern, v.content) } } /// This normalizer will take a `pattern` (for now only a String) /// and replace every occurrence with `content`. #[derive(Debug, Serialize, Deserialize)] #[serde(tag = "type", try_from = "ReplaceDeserializer")] pub struct Replace { pattern: ReplacePattern, content: String, #[serde(skip)] regex: SysRegex, } impl Clone for Replace { fn clone(&self) -> Self { Self::new(self.pattern.clone(), &self.content).unwrap() } } impl PartialEq for Replace { fn eq(&self, other: &Self) -> bool { self.pattern == other.pattern && self.content == other.content } } impl Replace { pub fn new<I: Into<ReplacePattern>, C: Into<String>>(pattern: I, content: C) -> Result<Self> { let pattern: ReplacePattern = pattern.into(); let regex = match &pattern { ReplacePattern::String(s) => SysRegex::new(&regex::escape(s))?, ReplacePattern::Regex(r) => SysRegex::new(r)?, }; Ok(Self { pattern, content: content.into(), regex, }) } } impl Normalizer for Replace { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.replace(&self.regex, &self.content) } } impl Decoder for Replace { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { tokens .into_iter() .map(|token| -> Result<String> { let mut new_token = "".to_string(); for ((start, stop), is_match) in (&self.regex).find_matches(&token)? { if is_match { new_token.push_str(&self.content); } else { new_token.push_str(&token[start..stop]); } } Ok(new_token) }) .collect() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_replace() { let original = "This is a ''test''"; let normalized = "This is a \"test\""; let mut n = NormalizedString::from(original); Replace::new("''", "\"").unwrap().normalize(&mut n).unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn test_replace_regex() { let original = "This is a test"; let normalized = "This is a test"; let mut n = NormalizedString::from(original); Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ') .unwrap() .normalize(&mut n) .unwrap(); assert_eq!(&n.get(), &normalized); } #[test] fn serialization() { let replace = Replace::new("Hello", "Hey").unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"String":"Hello"},"content":"Hey"}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); let replace = Replace::new(ReplacePattern::Regex(r"\s+".into()), ' ').unwrap(); let replace_s = r#"{"type":"Replace","pattern":{"Regex":"\\s+"},"content":" "}"#; assert_eq!(serde_json::to_string(&replace).unwrap(), replace_s); assert_eq!(serde_json::from_str::<Replace>(replace_s).unwrap(), replace); } #[test] fn test_replace_decode() { let original = vec!["hello".to_string(), "_hello".to_string()]; let replace = Replace::new("_", " ").unwrap(); assert_eq!( replace.decode_chain(original).unwrap(), vec!["hello", " hello"] ); } }
tokenizers/tokenizers/src/normalizers/replace.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/replace.rs", "repo_id": "tokenizers", "token_count": 2048 }
239
use regex::Regex; use crate::tokenizer::{ pattern::Invert, PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior, }; use crate::utils::macro_rules_attribute; #[derive(Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Whitespace; impl Default for Whitespace { fn default() -> Self { Self } } impl PreTokenizer for Whitespace { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { lazy_static! { static ref RE: Regex = Regex::new(r"\w+|[^\w\s]+").unwrap(); } let re_ref: &Regex = &RE; pretokenized.split(|_, normalized| { normalized.split(Invert(re_ref), SplitDelimiterBehavior::Removed) }) } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct WhitespaceSplit; impl PreTokenizer for WhitespaceSplit { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, normalized| { normalized.split(char::is_whitespace, SplitDelimiterBehavior::Removed) }) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType, PreTokenizer}; #[test] fn basic() { let tests = vec![ ( "Hey man!", vec![("Hey", (0, 3)), ("man", (4, 7)), ("!", (7, 8))], ), ( "How are you doing?", vec![ ("How", (0, 3)), ("are", (4, 7)), ("you", (8, 11)), ("doing", (12, 17)), ("?", (17, 18)), ], ), ("\n", vec![]), ]; let pretok = Whitespace {}; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } #[test] fn whitespace_split() { let tests = vec![ ("Hey man!", vec![("Hey", (0, 3)), ("man!", (4, 8))]), ( "Hey, man, Good?", vec![("Hey,", (0, 4)), ("man,", (5, 9)), ("Good?", (10, 15))], ), ]; let pretok = WhitespaceSplit; for (s, res) in tests { let mut pretokenized = PreTokenizedString::from(s); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), res ); } } }
tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/whitespace.rs", "repo_id": "tokenizers", "token_count": 1660 }
240
//! This comes from the Rust libcore and is duplicated here because it is not exported //! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>) //! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequence-of-resulta-b-e-to-a-veca-vecb-and-stop-on-f> //! because the one from the libcore seems to cause overflowing stacks in some cases //! It also contains a lines_with_ending that copies std::io::BufRead but keeps line endings. use std::io::BufRead; pub struct ResultShunt<I, E> { iter: I, error: Option<E>, } impl<I, T, E> ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { /// Process the given iterator as if it yielded a `T` instead of a /// `Result<T, _>`. Any errors will stop the inner iterator and /// the overall result will be an error. pub fn process<F, U>(iter: I, mut f: F) -> Result<U, E> where F: FnMut(&mut Self) -> U, { let mut shunt = ResultShunt::new(iter); let value = f(shunt.by_ref()); shunt.reconstruct(value) } fn new(iter: I) -> Self { ResultShunt { iter, error: None } } /// Consume the adapter and rebuild a `Result` value. This should /// *always* be called, otherwise any potential error would be /// lost. fn reconstruct<U>(self, val: U) -> Result<U, E> { match self.error { None => Ok(val), Some(e) => Err(e), } } } impl<I, T, E> Iterator for ResultShunt<I, E> where I: Iterator<Item = Result<T, E>>, { type Item = T; fn next(&mut self) -> Option<Self::Item> { match self.iter.next() { Some(Ok(v)) => Some(v), Some(Err(e)) => { self.error = Some(e); None } None => None, } } } /// Copied from std::io::BufRead but keep newline characters. #[derive(Debug)] pub struct Lines<B> { buf: B, } pub trait LinesWithEnding<B> { fn lines_with_ending(self) -> Lines<B>; } impl<B> LinesWithEnding<B> for B where B: BufRead, { fn lines_with_ending(self) -> Lines<B> { Lines::<B> { buf: self } } } impl<B: BufRead> Iterator for Lines<B> { type Item = std::io::Result<String>; fn next(&mut self) -> Option<Self::Item> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(0) => None, Ok(_n) => { // if buf.ends_with('\n') { // buf.pop(); // if buf.ends_with('\r') { // buf.pop(); // } // } Some(Ok(buf)) } Err(e) => Some(Err(e)), } } }
tokenizers/tokenizers/src/utils/iter.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/iter.rs", "repo_id": "tokenizers", "token_count": 1339 }
241
version: 2.1 setup: true orbs: continuation: circleci/continuation@0.1.0 parameters: nightly: type: boolean default: false jobs: # Ensure running with CircleCI/huggingface check_circleci_user: docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - run: echo $CIRCLE_PROJECT_USERNAME - run: | if [ "$CIRCLE_PROJECT_USERNAME" = "huggingface" ]; then exit 0 else echo "The CI is running under $CIRCLE_PROJECT_USERNAME personal account. Please follow https://support.circleci.com/hc/en-us/articles/360008097173-Troubleshooting-why-pull-requests-are-not-triggering-jobs-on-my-organization- to fix it."; exit -1 fi # Fetch the tests to run fetch_tests: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - checkout - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager GitPython - run: pip install -U --upgrade-strategy eager . - run: mkdir -p test_preparation - run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt - store_artifacts: path: ~/transformers/tests_fetched_summary.txt - run: | if [ -f test_list.txt ]; then cp test_list.txt test_preparation/test_list.txt else touch test_preparation/test_list.txt fi - run: | if [ -f examples_test_list.txt ]; then mv examples_test_list.txt test_preparation/examples_test_list.txt else touch test_preparation/examples_test_list.txt fi - run: | if [ -f filtered_test_list_cross_tests.txt ]; then mv filtered_test_list_cross_tests.txt test_preparation/filtered_test_list_cross_tests.txt else touch test_preparation/filtered_test_list_cross_tests.txt fi - run: | if [ -f doctest_list.txt ]; then cp doctest_list.txt test_preparation/doctest_list.txt else touch test_preparation/doctest_list.txt fi - run: | if [ -f test_repo_utils.txt ]; then mv test_repo_utils.txt test_preparation/test_repo_utils.txt else touch test_preparation/test_repo_utils.txt fi - run: python utils/tests_fetcher.py --filter_tests - run: | if [ -f test_list.txt ]; then mv test_list.txt test_preparation/filtered_test_list.txt else touch test_preparation/filtered_test_list.txt fi - store_artifacts: path: test_preparation/test_list.txt - store_artifacts: path: test_preparation/doctest_list.txt - store_artifacts: path: ~/transformers/test_preparation/filtered_test_list.txt - store_artifacts: path: test_preparation/examples_test_list.txt - run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: | if [ ! -s test_preparation/generated_config.yml ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi - run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt - store_artifacts: path: test_preparation/generated_config.txt - store_artifacts: path: test_preparation/filtered_test_list_cross_tests.txt - continuation/continue: configuration_path: test_preparation/generated_config.yml # To run all tests for the nightly build fetch_all_tests: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 parallelism: 1 steps: - checkout - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager GitPython - run: pip install -U --upgrade-strategy eager . - run: | mkdir test_preparation echo -n "tests" > test_preparation/test_list.txt echo -n "all" > test_preparation/examples_test_list.txt echo -n "tests/repo_utils" > test_preparation/test_repo_utils.txt - run: | echo -n "tests" > test_list.txt python utils/tests_fetcher.py --filter_tests mv test_list.txt test_preparation/filtered_test_list.txt - run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation - run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt - store_artifacts: path: test_preparation/generated_config.txt - continuation/continue: configuration_path: test_preparation/generated_config.yml check_code_quality: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.7-code_quality-pip-{{ checksum "setup.py" }} - v0.7-code-quality-pip - restore_cache: keys: - v0.7-code_quality-site-packages-{{ checksum "setup.py" }} - v0.7-code-quality-site-packages - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager .[all,quality] - save_cache: key: v0.7-code_quality-pip-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - save_cache: key: v0.7-code_quality-site-packages-{{ checksum "setup.py" }} paths: - '~/.pyenv/versions/' - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1) - run: ruff check examples tests src utils - run: ruff format tests src utils --check - run: python utils/custom_init_isort.py --check_only - run: python utils/sort_auto_mappings.py --check_only - run: python utils/check_doc_toc.py check_repository_consistency: working_directory: ~/transformers docker: - image: cimg/python:3.8.12 resource_class: large environment: TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 parallelism: 1 steps: - checkout - restore_cache: keys: - v0.7-repository_consistency-pip-{{ checksum "setup.py" }} - v0.7-repository_consistency-pip - restore_cache: keys: - v0.7-repository_consistency-site-packages-{{ checksum "setup.py" }} - v0.7-repository_consistency-site-packages - run: pip install --upgrade --upgrade-strategy eager pip - run: pip install -U --upgrade-strategy eager .[all,quality] - save_cache: key: v0.7-repository_consistency-pip-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - save_cache: key: v0.7-repository_consistency-site-packages-{{ checksum "setup.py" }} paths: - '~/.pyenv/versions/' - run: name: Show installed libraries and their versions command: pip freeze | tee installed.txt - store_artifacts: path: ~/transformers/installed.txt - run: python utils/check_copies.py - run: python utils/check_table.py - run: python utils/check_dummies.py - run: python utils/check_repo.py - run: python utils/check_inits.py - run: python utils/check_config_docstrings.py - run: python utils/check_config_attributes.py - run: python utils/check_doctest_list.py - run: make deps_table_check_updated - run: python utils/update_metadata.py --check-only - run: python utils/check_task_guides.py - run: python utils/check_docstrings.py - run: python utils/check_support_list.py workflows: version: 2 setup_and_quality: when: not: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_tests nightly: when: <<pipeline.parameters.nightly>> jobs: - check_circleci_user - check_code_quality - check_repository_consistency - fetch_all_tests
transformers/.circleci/config.yml/0
{ "file_path": "transformers/.circleci/config.yml", "repo_id": "transformers", "token_count": 5249 }
242
FROM rocm/dev-ubuntu-20.04:5.6 # rocm/pytorch has no version with 2.1.0 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.1.0' ARG TORCH_VISION='0.16.0' ARG TORCH_AUDIO='2.1.0' ARG ROCM='5.6' RUN apt update && \ apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-dev python3-pip ffmpeg && \ apt clean && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install --no-cache-dir --upgrade pip RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0" ARG REF=main WORKDIR / # Invalidate docker cache from here if new commit is available. ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video] RUN python3 -m pip uninstall -y tensorflow flax # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop # Remove nvml as it is not compatible with ROCm RUN python3 -m pip uninstall py3nvml pynvml -y
transformers/docker/transformers-pytorch-amd-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-amd-gpu/Dockerfile", "repo_id": "transformers", "token_count": 551 }
243
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Verteiltes Training mit 🤗 Accelerate Da die Modelle immer größer werden, hat sich die Parallelität als Strategie zum Trainieren größerer Modelle auf begrenzter Hardware und zur Beschleunigung der Trainingsgeschwindigkeit um mehrere Größenordnungen erwiesen. Bei Hugging Face haben wir die Bibliothek [🤗 Accelerate](https://huggingface.co/docs/accelerate) entwickelt, um Nutzern zu helfen, ein 🤗 Transformers-Modell auf jeder Art von verteiltem Setup zu trainieren, egal ob es sich um mehrere GPUs auf einer Maschine oder mehrere GPUs auf mehreren Maschinen handelt. In diesem Tutorial lernen Sie, wie Sie Ihre native PyTorch-Trainingsschleife anpassen, um das Training in einer verteilten Umgebung zu ermöglichen. ## Einrichtung Beginnen Sie mit der Installation von 🤗 Accelerate: ```bash pip install accelerate ``` Dann importieren und erstellen Sie ein [`~accelerate.Accelerator`]-Objekt. Der [`~accelerate.Accelerator`] wird automatisch Ihre Art der verteilten Einrichtung erkennen und alle notwendigen Komponenten für das Training initialisieren. Sie müssen Ihr Modell nicht explizit auf einem Gerät platzieren. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Vorbereiten auf die Beschleunigung Der nächste Schritt ist die Übergabe aller relevanten Trainingsobjekte an die Methode [`~accelerate.Accelerator.prepare`]. Dazu gehören Ihre Trainings- und Evaluierungs-DataLoader, ein Modell und ein Optimierer: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Rückwärts Die letzte Ergänzung besteht darin, das typische `loss.backward()` in der Trainingsschleife durch die 🤗 Accelerate-Methode [`~accelerate.Accelerator.backward`] zu ersetzen: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` Wie Sie im folgenden Code sehen können, müssen Sie nur vier zusätzliche Codezeilen zu Ihrer Trainingsschleife hinzufügen, um verteiltes Training zu ermöglichen! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## Trainieren Sobald Sie die entsprechenden Codezeilen hinzugefügt haben, starten Sie Ihr Training in einem Skript oder einem Notebook wie Colaboratory. ### Trainieren mit einem Skript Wenn Sie Ihr Training mit einem Skript durchführen, führen Sie den folgenden Befehl aus, um eine Konfigurationsdatei zu erstellen und zu speichern: ```bash accelerate config ``` Dann starten Sie Ihr Training mit: ```bash accelerate launch train.py ``` ### Trainieren mit einem Notebook 🤗 Accelerate kann auch in einem Notebook laufen, wenn Sie planen, die TPUs von Colaboratory zu verwenden. Verpacken Sie den gesamten Code, der für das Training verantwortlich ist, in eine Funktion und übergeben Sie diese an [`~accelerate.notebook_launcher`]: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` Weitere Informationen über 🤗 Accelerate und seine umfangreichen Funktionen finden Sie in der [Dokumentation](https://huggingface.co/docs/accelerate).
transformers/docs/source/de/accelerate.md/0
{ "file_path": "transformers/docs/source/de/accelerate.md", "repo_id": "transformers", "token_count": 1929 }
244
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testen Werfen wir einen Blick darauf, wie 🤗 Transformers-Modelle getestet werden und wie Sie neue Tests schreiben und die vorhandenen verbessern können. Es gibt 2 Testsuiten im Repository: 1. `tests` -- Tests für die allgemeine API 2. `examples` -- Tests hauptsächlich für verschiedene Anwendungen, die nicht Teil der API sind ## Wie Transformatoren getestet werden 1. Sobald ein PR eingereicht wurde, wird er mit 9 CircleCi Jobs getestet. Jeder neue Commit zu diesem PR wird erneut getestet. Diese Aufträge sind in dieser [Konfigurationsdatei](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml) definiert, so dass Sie bei Bedarf die gleiche Umgebung auf Ihrem Rechner reproduzieren können. Umgebung auf Ihrem Rechner reproduzieren können. Diese CI-Jobs führen keine `@slow`-Tests durch. 2. Es gibt 3 Jobs, die von [github actions](https://github.com/huggingface/transformers/actions) ausgeführt werden: - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): prüft, ob die torch hub Integration funktioniert. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): führt schnelle Tests auf der GPU nur bei Commits auf `main`. Es wird nur ausgeführt, wenn ein Commit auf `main` den Code in einem der folgenden Ordner aktualisiert hat: `src`, `tests`, `.github` (um zu verhindern, dass er auf hinzugefügten Modellkarten, Notebooks usw. läuft) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): führt normale und langsame Tests auf GPU in `tests` und `examples`: ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` Die Ergebnisse können Sie [hier](https://github.com/huggingface/transformers/actions) sehen. ## Tests ausführen ### Auswahl der auszuführenden Tests In diesem Dokument wird ausführlich erläutert, wie Tests ausgeführt werden können. Wenn Sie nach der Lektüre noch mehr Details benötigen finden Sie diese [hier](https://docs.pytest.org/en/latest/usage.html). Hier sind einige der nützlichsten Möglichkeiten, Tests auszuführen. Alle ausführen: ```console pytest ``` oder: ```bash make test ``` Beachten Sie, dass Letzteres wie folgt definiert ist: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` was pytest anweist: - so viele Testprozesse laufen zu lassen, wie es CPU-Kerne gibt (was zu viele sein könnten, wenn Sie nicht über eine Menge RAM verfügen!) - sicherzustellen, dass alle Tests aus derselben Datei von demselben Testprozess ausgeführt werden - Erfassen Sie keine Ausgaben - im ausführlichen Modus laufen lassen ### Abrufen der Liste aller Tests Alle Tests der Testsuite: ```bash pytest --collect-only -q ``` Alle Tests einer bestimmten Testdatei: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Führen Sie ein bestimmtes Testmodul aus Um ein einzelnes Testmodul auszuführen: ```bash pytest tests/utils/test_logging.py ``` ### Spezifische Tests ausführen Da unittest in den meisten Tests verwendet wird, müssen Sie, um bestimmte Untertests auszuführen, den Namen der unittest Klasse, die diese Tests enthält. Er könnte zum Beispiel lauten: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` Hier: - `tests/test_optimization.py` - die Datei mit den Tests - `OptimizationTest` - der Name der Klasse - `test_adam_w` - der Name der spezifischen Testfunktion Wenn die Datei mehrere Klassen enthält, können Sie auswählen, dass nur die Tests einer bestimmten Klasse ausgeführt werden sollen. Zum Beispiel: ```bash pytest tests/test_optimization.py::OptimizationTest ``` führt alle Tests innerhalb dieser Klasse aus. Wie bereits erwähnt, können Sie sehen, welche Tests in der Klasse `OptimizationTest` enthalten sind, indem Sie sie ausführen: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` Sie können Tests mit Hilfe von Schlüsselwortausdrücken ausführen. Um nur Tests auszuführen, deren Name `adam` enthält: ```bash pytest -k adam tests/test_optimization.py ``` Die logischen `und` und `oder` können verwendet werden, um anzugeben, ob alle Schlüsselwörter übereinstimmen sollen oder nur eines. `nicht` kann verwendet werden, um negieren. Um alle Tests auszuführen, außer denen, deren Name `adam` enthält: ```bash pytest -k "not adam" tests/test_optimization.py ``` Und Sie können die beiden Muster in einem kombinieren: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` Um zum Beispiel sowohl `test_adafactor` als auch `test_adam_w` auszuführen, können Sie verwenden: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` Beachten Sie, dass wir hier `oder` verwenden, da wir wollen, dass eines der Schlüsselwörter übereinstimmt, um beide einzuschließen. Wenn Sie nur Tests einschließen möchten, die beide Muster enthalten, müssen Sie `und` verwenden: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Führen Sie `accelerate` Tests durch Manchmal müssen Sie `accelerate` Tests für Ihre Modelle ausführen. Dazu fügen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausführen möchten: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Dokumentationstests ausführen Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind. Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden: ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufügen. ### Nur geänderte Tests ausführen Mit [pytest-picked](https://github.com/anapaulagomes/pytest-picked) können Sie die Tests ausführen, die sich auf die unstaged Dateien oder den aktuellen Zweig (gemäß Git) beziehen. Auf diese Weise können Sie schnell testen, ob Ihre Änderungen nichts kaputt gemacht haben. nichts kaputt gemacht haben, da die Tests für Dateien, die Sie nicht verändert haben, nicht ausgeführt werden. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` Alle Tests werden von Dateien und Ordnern ausgeführt, die geändert, aber noch nicht übergeben wurden. ### Fehlgeschlagene Tests bei Änderung der Quelle automatisch wiederholen [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) bietet eine sehr nützliche Funktion zur Erkennung aller fehlgeschlagenen Tests zu erkennen und dann darauf zu warten, dass Sie Dateien ändern, um die fehlgeschlagenen Tests so lange zu wiederholen, bis sie erfolgreich sind, während Sie die sie reparieren. So müssen Sie pytest nicht erneut starten, nachdem Sie die Korrektur vorgenommen haben. Dies wird so lange wiederholt, bis alle Tests bestanden sind. Danach wird erneut ein vollständiger Durchlauf durchgeführt. ```bash pip install pytest-xdist ``` So rufen Sie den Modus auf: `pytest -f` oder `pytest --looponfail` Datei-Änderungen werden erkannt, indem die Wurzelverzeichnisse von `looponfailroots` und alle ihre Inhalte (rekursiv) untersucht werden. Wenn die Vorgabe für diesen Wert für Sie nicht funktioniert, können Sie ihn in Ihrem Projekt ändern, indem Sie eine Konfigurations Option in der Datei `setup.cfg` ändern: ```ini [tool:pytest] looponfailroots = transformers tests ``` oder die Dateien `pytest.ini`/`tox.ini``: ```ini [pytest] looponfailroots = transformers tests ``` Dies würde dazu führen, dass nur nach Dateiänderungen in den jeweiligen Verzeichnissen gesucht wird, die relativ zum Verzeichnis der ini-Datei angegeben sind. Verzeichnis. [pytest-watch](https://github.com/joeyespo/pytest-watch) ist eine alternative Implementierung dieser Funktionalität. ### Überspringen eines Testmoduls Wenn Sie alle Testmodule ausführen möchten, mit Ausnahme einiger weniger, können Sie diese ausschließen, indem Sie eine explizite Liste der auszuführenden Tests angeben. Für Beispiel: Um alle Tests außer `test_modeling_*.py` auszuführen: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Status leeren CI-Builds und wenn Isolation wichtig ist (gegen Geschwindigkeit), sollte der Cache geleert werden: ```bash pytest --cache-clear tests ``` ### Tests parallel ausführen Wie bereits erwähnt, führt `make test` über das Plugin `pytest-xdist` Tests parallel aus (Argument `-n X`, z.B. `-n 2` um 2 Jobs parallel laufen zu lassen). Mit der Option `--dist=` von `pytest-xdist` können Sie steuern, wie die Tests gruppiert werden. Mit `--dist=loadfile` werden die Tests, die sich in einer Datei befinden, in denselben Prozess. Da die Reihenfolge der ausgeführten Tests unterschiedlich und nicht vorhersehbar ist, kann die Ausführung der Testsuite mit `pytest-xdist` zu Fehlern führt (was bedeutet, dass wir einige unentdeckte gekoppelte Tests haben), verwenden Sie [pytest-replay](https://github.com/ESSS/pytest-replay), um die Tests in der gleichen Reihenfolge abzuspielen, was dabei helfen sollte diese fehlgeschlagene Sequenz auf ein Minimum zu reduzieren. ### Testreihenfolge und Wiederholung Es ist gut, die Tests mehrmals zu wiederholen, nacheinander, zufällig oder in Gruppen, um mögliche Abhängigkeiten und zustandsbezogene Fehler zu erkennen (Abriss). Und die einfache, mehrfache Wiederholung ist einfach gut, um einige Probleme zu erkennen, die durch die Zufälligkeit von DL aufgedeckt werden. #### Wiederholungstests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` Und führen Sie dann jeden Test mehrmals durch (standardmäßig 50): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> Dieses Plugin funktioniert nicht mit dem `-n` Flag von `pytest-xdist`. </Tip> <Tip> Es gibt noch ein anderes Plugin `pytest-repeat`, aber es funktioniert nicht mit `unittest`. </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` Wichtig: Das Vorhandensein von `pytest-random-order` sorgt für eine automatische Zufallsanordnung der Tests, es sind keine Konfigurationsänderungen oder Befehlszeilenoptionen sind nicht erforderlich. Wie bereits erläutert, ermöglicht dies die Erkennung von gekoppelten Tests - bei denen der Zustand eines Tests den Zustand eines anderen beeinflusst. Wenn `pytest-random-order` installiert ist, gibt es den Zufallswert aus, der für diese Sitzung verwendet wurde, z.B: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` Wenn eine bestimmte Sequenz fehlschlägt, können Sie sie reproduzieren, indem Sie genau diesen Seed hinzufügen, z.B: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` Es wird nur dann die exakte Reihenfolge reproduzieren, wenn Sie genau dieselbe Liste von Tests (oder gar keine Liste) verwenden. Sobald Sie beginnen, die Liste die Liste manuell einzugrenzen, können Sie sich nicht mehr auf den Seed verlassen, sondern müssen die Tests manuell in der genauen Reihenfolge auflisten auflisten und pytest anweisen, sie nicht zu randomisieren, indem Sie `--random-order-bucket=none` verwenden, z.B.: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` So deaktivieren Sie das Shuffling für alle Tests: ```bash pytest --random-order-bucket=none ``` Standardmäßig ist `--random-order-bucket=module` impliziert, wodurch die Dateien auf den Modulebenen gemischt werden. Es kann auch auf den Ebenen `class`, `package`, `global` und `none` mischen. Die vollständigen Details entnehmen Sie bitte der [Dokumentation](https://github.com/jbasko/pytest-random-order). Eine weitere Alternative zur Randomisierung ist: [`pytest-random`](https://github.com/pytest-dev/pytest-randomly). Dieses Modul hat eine sehr ähnliche Funktionalität/Schnittstelle, aber es hat nicht die Eimermodi, die in `pytest-random-order` zur Verfügung. Es hat das gleiche Problem, dass es sich nach der Installation aufdrängt. ### Variationen von Aussehen und Bedienung #### pytest-zucker [pytest-sugar](https://github.com/Frozenball/pytest-sugar) ist ein Plugin, das das Erscheinungsbild verbessert, eine Fortschrittsbalken hinzufügt und Tests, die fehlschlagen, sowie die Bestätigung sofort anzeigt. Es wird bei der Installation automatisch aktiviert. ```bash pip install pytest-sugar ``` Um Tests ohne sie durchzuführen, führen Sie aus: ```bash pytest -p no:sugar ``` oder deinstallieren Sie es. #### Melden Sie den Namen jedes Subtests und seinen Fortschritt Für einen einzelnen oder eine Gruppe von Tests über `pytest` (nach `pip install pytest-pspec`): ```bash pytest --pspec tests/test_optimization.py ``` #### Zeigt fehlgeschlagene Tests sofort an [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) zeigt Fehlschläge und Fehler sofort an, anstatt bis zum Ende der Testsitzung zu warten. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### Zu GPU oder nicht zu GPU Bei einem GPU-aktivierten Setup fügen Sie zum Testen im reinen CPU-Modus `CUDA_VISIBLE_DEVICES=""` hinzu: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` oder wenn Sie mehrere Grafikprozessoren haben, können Sie angeben, welcher von `pytest` verwendet werden soll. Wenn Sie zum Beispiel nur den zweiten Grafikkarte zu verwenden, wenn Sie die Grafikkarten `0` und `1` haben, können Sie folgendes ausführen: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` Dies ist praktisch, wenn Sie verschiedene Aufgaben auf verschiedenen GPUs ausführen möchten. Einige Tests müssen nur auf der CPU ausgeführt werden, andere entweder auf der CPU, der GPU oder der TPU und wieder andere auf mehreren GPUs. Die folgenden skip Dekorateure werden verwendet, um die Anforderungen von Tests in Bezug auf CPU/GPU/TPU festzulegen: - `require_torch` - dieser Test wird nur unter Torch ausgeführt - `require_torch_gpu` - wie `require_torch` plus erfordert mindestens 1 GPU - `require_torch_multi_gpu` - wie `require_torch` und zusätzlich mindestens 2 GPUs erforderlich - `require_torch_non_multi_gpu` - wie `require_torch` plus benötigt 0 oder 1 GPUs - `require_torch_up_to_2_gpus` - wie `require_torch` plus erfordert 0 oder 1 oder 2 GPUs - `require_torch_xla` - wie `require_torch` plus erfordert mindestens 1 TPU Lassen Sie uns die GPU-Anforderungen in der folgenden Tabelle darstellen: | n gpus | decorator | |--------|--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | Hier ist zum Beispiel ein Test, der nur ausgeführt werden muss, wenn 2 oder mehr GPUs verfügbar sind und pytorch installiert ist: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` Wenn ein Test `tensorflow` benötigt, verwenden Sie den Dekorator `require_tf`. Zum Beispiel: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` Diese Dekors können gestapelt werden. Wenn zum Beispiel ein Test langsam ist und mindestens eine GPU unter pytorch benötigt, können Sie wie Sie ihn einrichten können: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` Einige Dekoratoren wie `@parametrized` schreiben Testnamen um, daher müssen `@require_*`-Sprungdekoratoren als letztes aufgeführt werden. zuletzt aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` Dieses Problem mit der Reihenfolge gibt es bei `@pytest.mark.parametrize` nicht, Sie können es an den Anfang oder an den Schluss setzen und es wird trotzdem funktionieren. funktionieren. Aber es funktioniert nur bei Nicht-Unittests. Innerhalb von Tests: - Wie viele GPUs sind verfügbar: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testen mit einem bestimmten PyTorch-Backend oder Gerät Um die Testsuite auf einem bestimmten Torch-Gerät auszuführen, fügen Sie `TRANSFORMERS_TEST_DEVICE="$Gerät"` hinzu, wobei `$Gerät` das Ziel-Backend ist. Zum Beispiel, um nur auf der CPU zu testen: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` Diese Variable ist nützlich, um benutzerdefinierte oder weniger verbreitete PyTorch-Backends wie `mps` zu testen. Sie kann auch verwendet werden, um den gleichen Effekt wie `CUDA_VISIBLE_DEVICES` zu erzielen, indem Sie bestimmte GPUs anvisieren oder im reinen CPU-Modus testen. Bestimmte Geräte erfordern einen zusätzlichen Import, nachdem Sie `torch` zum ersten Mal importiert haben. Dies kann über die Umgebungsvariable `TRANSFORMERS_TEST_BACKEND` festgelegt werden: ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` ### Verteiltes Training `pytest` kann nicht direkt mit verteiltem Training umgehen. Wenn dies versucht wird, tun die Unterprozesse nicht das Richtige und denken am Ende, sie seien `pytest` und beginnen, die Testsuite in Schleifen auszuführen. Es funktioniert jedoch, wenn man einen normalen Prozess erzeugt, der dann mehrere Worker erzeugt und die IO-Pipes verwaltet. Hier sind einige Tests, die dies verwenden: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) Um direkt mit der Ausführung zu beginnen, suchen Sie in diesen Tests nach dem Aufruf `execute_subprocess_async`. Sie benötigen mindestens 2 GPUs, um diese Tests in Aktion zu sehen: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Erfassung von Ausgaben Während der Testausführung werden alle Ausgaben, die an `stdout` und `stderr` gesendet werden, aufgezeichnet. Wenn ein Test oder eine Setup-Methode fehlschlägt, wird die wird die entsprechende aufgezeichnete Ausgabe in der Regel zusammen mit dem Fehler-Traceback angezeigt. Um die Aufzeichnung von Ausgaben zu deaktivieren und `stdout` und `stderr` normal zu erhalten, verwenden Sie `-s` oder `--capture=no`: ```bash pytest -s tests/utils/test_logging.py ``` So senden Sie Testergebnisse an die JUnit-Formatausgabe: ```bash py.test tests --junitxml=result.xml ``` ### Farbsteuerung Keine Farbe zu haben (z.B. gelb auf weißem Hintergrund ist nicht lesbar): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Testbericht an den Online-Dienst pastebin senden Erstellen Sie eine URL für jeden Testfehler: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` Dadurch werden Informationen über den Testlauf an einen entfernten Paste-Dienst übermittelt und eine URL für jeden Fehlschlag bereitgestellt. Sie können die Tests wie gewohnt auswählen oder z.B. -x hinzufügen, wenn Sie nur einen bestimmten Fehler senden möchten. Erstellen einer URL für ein ganzes Testsitzungsprotokoll: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Tests schreiben 🤗 Die Tests von Transformers basieren auf `unittest`, werden aber von `pytest` ausgeführt, so dass die meiste Zeit Funktionen aus beiden Systemen verwendet werden können. Sie können [hier](https://docs.pytest.org/en/stable/unittest.html) nachlesen, welche Funktionen unterstützt werden, aber das Wichtigste ist Wichtig ist, dass die meisten `pytest`-Fixtures nicht funktionieren. Auch die Parametrisierung nicht, aber wir verwenden das Modul `parametrisiert`, das auf ähnliche Weise funktioniert. ### Parametrisierung Oft besteht die Notwendigkeit, denselben Test mehrmals auszuführen, aber mit unterschiedlichen Argumenten. Das könnte innerhalb des Tests geschehen des Tests gemacht werden, aber dann gibt es keine Möglichkeit, den Test mit nur einem Satz von Argumenten auszuführen. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` Nun wird dieser Test standardmäßig 3 Mal ausgeführt, wobei jedes Mal die letzten 3 Argumente von `test_floor` den entsprechenden Argumenten in der Parameterliste zugeordnet werden. die entsprechenden Argumente in der Parameterliste. Sie können auch nur die Parameter `negativ` und `ganzzahlig` mit ausführen: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` oder alle Untertests außer `negativ`, mit: ```bash pytest -k "not negative" tests/test_mytest.py ``` Neben der Verwendung des gerade erwähnten Filters `-k` können Sie auch den genauen Namen jedes Untertests herausfinden und jeden oder alle unter Verwendung ihrer genauen Namen ausführen. ```bash pytest test_this1.py --collect-only -q ``` und es wird aufgelistet: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` Jetzt können Sie also nur 2 spezifische Untertests durchführen: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` Das Modul [parametrisiert](https://pypi.org/project/parameterized/), das sich bereits in den Entwickler-Abhängigkeiten befindet von `transformers` befindet, funktioniert sowohl für `unittests` als auch für `pytest` Tests. Wenn es sich bei dem Test jedoch nicht um einen `Unittest` handelt, können Sie `pytest.mark.parametrize` verwenden (oder Sie können sehen, dass es in einigen bestehenden Tests verwendet wird, meist unter `Beispiele`). Hier ist das gleiche Beispiel, diesmal unter Verwendung der `parametrize`-Markierung von `pytest`: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` Genau wie bei `parametrisiert` können Sie mit `pytest.mark.parametrize` genau steuern, welche Subtests ausgeführt werden ausgeführt werden, wenn der Filter `-k` nicht ausreicht. Allerdings erzeugt diese Parametrisierungsfunktion einen etwas anderen Satz von Namen für die Untertests. Sie sehen folgendermaßen aus: ```bash pytest test_this2.py --collect-only -q ``` und es wird aufgelistet: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` Jetzt können Sie also nur den spezifischen Test durchführen: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` wie im vorherigen Beispiel. ### Dateien und Verzeichnisse In Tests müssen wir oft wissen, wo sich Dinge relativ zur aktuellen Testdatei befinden, und das ist nicht trivial, da der Test von mehreren Verzeichnissen aus aufgerufen werden kann oder sich in Unterverzeichnissen mit unterschiedlicher Tiefe befinden kann. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` löst dieses Problem, indem sie alle grundlegenden Pfade sortiert und einfache Zugriffsmöglichkeiten auf sie bietet: - `pathlib`-Objekte (alle vollständig aufgelöst): - `test_file_path` - der aktuelle Testdateipfad, d.h. `__file__` - `test_file_dir` - das Verzeichnis, das die aktuelle Testdatei enthält - `tests_dir` - das Verzeichnis der `tests` Testreihe - `examples_dir` - das Verzeichnis der `examples` Test-Suite - `repo_root_dir` - das Verzeichnis des Repositorys - `src_dir` - das Verzeichnis von `src` (d.h. wo sich das Unterverzeichnis `transformers` befindet) - stringifizierte Pfade - wie oben, aber diese geben Pfade als Strings zurück, anstatt als `pathlib`-Objekte: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` Um diese zu verwenden, müssen Sie lediglich sicherstellen, dass der Test in einer Unterklasse von `transformers.test_utils.TestCasePlus` befindet. Zum Beispiel: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` Wenn Sie Pfade nicht über `pathlib` manipulieren müssen oder nur einen Pfad als String benötigen, können Sie jederzeit `str()` auf das `pathlib`-Objekt anwenden oder die Accessoren mit der Endung `_str` verwenden. Zum Beispiel: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporäre Dateien und Verzeichnisse Die Verwendung eindeutiger temporärer Dateien und Verzeichnisse ist für die parallele Durchführung von Tests unerlässlich, damit sich die Tests nicht gegenseitig überschreiben. Daten gegenseitig überschreiben. Außerdem möchten wir, dass die temporären Dateien und Verzeichnisse am Ende jedes Tests, der sie erstellt hat, gelöscht werden. erstellt hat. Daher ist die Verwendung von Paketen wie `tempfile`, die diese Anforderungen erfüllen, unerlässlich. Beim Debuggen von Tests müssen Sie jedoch sehen können, was in der temporären Datei oder dem temporären Verzeichnis gespeichert wird und Sie möchten Sie müssen den genauen Pfad kennen und dürfen ihn nicht bei jedem neuen Testdurchlauf zufällig ändern. Für solche Zwecke ist die Hilfsklasse `transformers.test_utils.TestCasePlus` am besten geeignet. Sie ist eine Unterklasse von Unittest.TestCase`, so dass wir in den Testmodulen einfach von ihr erben können. Hier ist ein Beispiel für die Verwendung dieser Klasse: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` Dieser Code erstellt ein eindeutiges temporäres Verzeichnis und setzt `tmp_dir` auf dessen Speicherort. - Erstellen Sie ein eindeutiges temporäres Verzeichnis: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` tmp_dir" enthält den Pfad zu dem erstellten temporären Verzeichnis. Es wird am Ende des Tests automatisch entfernt. Tests entfernt. - Erstellen Sie ein temporäres Verzeichnis meiner Wahl, stellen Sie sicher, dass es leer ist, bevor der Test beginnt, und leeren Sie es nach dem Test nicht. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` Dies ist nützlich für die Fehlersuche, wenn Sie ein bestimmtes Verzeichnis überwachen und sicherstellen möchten, dass die vorherigen Tests keine Daten darin hinterlassen haben. keine Daten dort hinterlassen haben. - Sie können das Standardverhalten außer Kraft setzen, indem Sie die Argumente `before` und `after` direkt überschreiben, was zu einem der folgenden Verhaltensweisen führt folgenden Verhaltensweisen: - `before=True`: das temporäre Verzeichnis wird immer zu Beginn des Tests gelöscht. - `before=False`: wenn das temporäre Verzeichnis bereits existiert, bleiben alle vorhandenen Dateien dort erhalten. - `after=True`: das temporäre Verzeichnis wird immer am Ende des Tests gelöscht. - `after=False`: das temporäre Verzeichnis wird am Ende des Tests immer beibehalten. <Tip> Um das Äquivalent von `rm -r` sicher ausführen zu können, sind nur Unterverzeichnisse des Projektarchivs checkout erlaubt, wenn ein explizites `tmp_dir` verwendet wird, so dass nicht versehentlich ein `/tmp` oder ein ähnlich wichtiger Teil des Dateisystems vernichtet wird. d.h. geben Sie bitte immer Pfade an, die mit `./` beginnen. </Tip> <Tip> Jeder Test kann mehrere temporäre Verzeichnisse registrieren, die alle automatisch entfernt werden, sofern nicht anders gewünscht. anders. </Tip> ### Temporäre Überschreibung von sys.path Wenn Sie `sys.path` vorübergehend überschreiben müssen, um z.B. von einem anderen Test zu importieren, können Sie den Kontextmanager `ExtendSysPath` verwenden. Beispiel: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Überspringen von Tests Dies ist nützlich, wenn ein Fehler gefunden und ein neuer Test geschrieben wird, der Fehler aber noch nicht behoben ist. Damit wir ihn in das Haupt-Repository zu übertragen, müssen wir sicherstellen, dass er bei `make test` übersprungen wird. Methoden: - Ein **Skip** bedeutet, dass Sie erwarten, dass Ihr Test nur dann erfolgreich ist, wenn einige Bedingungen erfüllt sind, andernfalls sollte pytest den Test überspringen. die Ausführung des Tests ganz überspringen. Übliche Beispiele sind das Überspringen von Tests, die nur unter Windows laufen, auf Nicht-Windows-Plattformen oder das Überspringen von Tests, die von einer externen Ressource abhängen, die im Moment nicht verfügbar ist (z.B. eine Datenbank). - Ein **xfail** bedeutet, dass Sie erwarten, dass ein Test aus irgendeinem Grund fehlschlägt. Ein gängiges Beispiel ist ein Test für eine Funktion, die noch nicht noch nicht implementiert oder ein noch nicht behobener Fehler. Wenn ein Test trotz eines erwarteten Fehlschlags bestanden wird (markiert mit pytest.mark.xfail), ist dies ein xpass und wird in der Testzusammenfassung gemeldet. Einer der wichtigsten Unterschiede zwischen den beiden ist, dass `skip` den Test nicht ausführt, während `xfail` dies tut. Wenn also der Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf andere Tests auswirkt, sollten Sie also nicht `xfail` verwenden. #### Implementierung - Hier sehen Sie, wie Sie einen ganzen Test bedingungslos überspringen können: ```python no-style @unittest.skip("this bug needs to be fixed") def test_feature_x(): ``` oder mit pytest: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` oder mit dem `xfail` Weg: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` - Hier erfahren Sie, wie Sie einen Test aufgrund einer internen Prüfung innerhalb des Tests auslassen können: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` oder das ganze Modul: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` oder mit dem `xfail` Weg: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - Hier erfahren Sie, wie Sie alle Tests in einem Modul überspringen können, wenn ein Import fehlt: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - Einen Test aufgrund einer Bedingung überspringen: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` oder: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` oder überspringen Sie das ganze Modul: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` Weitere Details, Beispiele und Möglichkeiten finden Sie [hier](https://docs.pytest.org/en/latest/skipping.html). ### Langsame Tests Die Bibliothek der Tests wächst ständig, und einige der Tests brauchen Minuten, um ausgeführt zu werden, daher können wir es uns nicht leisten, eine Stunde zu warten, bis die eine Stunde auf die Fertigstellung der Testsuite auf CI zu warten. Daher sollten langsame Tests, mit einigen Ausnahmen für wichtige Tests, wie im folgenden Beispiel wie im folgenden Beispiel markiert werden: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` Sobald ein Test als `@slow` markiert ist, setzen Sie die Umgebungsvariable `RUN_SLOW=1`, um solche Tests auszuführen, z.B: ```bash RUN_SLOW=1 pytest tests ``` Einige Dekoratoren wie `@parameterized` schreiben Testnamen um, daher müssen `@slow` und die übrigen Skip-Dekoratoren `@require_*` müssen als letztes aufgeführt werden, damit sie korrekt funktionieren. Hier ist ein Beispiel für die korrekte Verwendung: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` Wie zu Beginn dieses Dokuments erläutert, werden langsame Tests nach einem Zeitplan ausgeführt und nicht in PRs CI Prüfungen. Es ist also möglich, dass einige Probleme bei der Einreichung eines PRs übersehen werden und zusammengeführt werden. Solche Probleme werden werden beim nächsten geplanten CI-Job abgefangen. Das bedeutet aber auch, dass es wichtig ist, die langsamen Tests auf Ihrem Rechner auszuführen, bevor Sie den PR einreichen. Hier ist ein grober Entscheidungsmechanismus für die Auswahl der Tests, die als langsam markiert werden sollen: Wenn der Test auf eine der internen Komponenten der Bibliothek ausgerichtet ist (z.B. Modellierungsdateien, Tokenisierungsdateien, Pipelines), dann sollten wir diesen Test in der nicht langsamen Testsuite ausführen. Wenn er sich auf einen anderen Aspekt der Bibliothek bezieht, wie z.B. die Dokumentation oder die Beispiele, dann sollten wir diese Tests in der langsamen Testsuite durchführen. Und dann, zur Verfeinerung Ansatz zu verfeinern, sollten wir Ausnahmen einführen: - Alle Tests, die einen umfangreichen Satz von Gewichten oder einen Datensatz mit einer Größe von mehr als ~50MB herunterladen müssen (z.B. Modell- oder Tokenizer-Integrationstests, Pipeline-Integrationstests) sollten auf langsam gesetzt werden. Wenn Sie ein neues Modell hinzufügen, sollten Sie sollten Sie eine kleine Version des Modells (mit zufälligen Gewichtungen) für Integrationstests erstellen und in den Hub hochladen. Dies wird wird in den folgenden Abschnitten erläutert. - Alle Tests, die ein Training durchführen müssen, das nicht speziell auf Schnelligkeit optimiert ist, sollten auf langsam gesetzt werden. - Wir können Ausnahmen einführen, wenn einige dieser Tests, die nicht langsam sein sollten, unerträglich langsam sind, und sie auf `@slow`. Auto-Modellierungstests, die große Dateien auf der Festplatte speichern und laden, sind ein gutes Beispiel für Tests, die als als `@slow` markiert sind. - Wenn ein Test in weniger als 1 Sekunde auf CI abgeschlossen wird (einschließlich eventueller Downloads), sollte es sich trotzdem um einen normalen Test handeln. Insgesamt müssen alle nicht langsamen Tests die verschiedenen Interna abdecken und dabei schnell bleiben. Zum Beispiel, kann eine signifikante Abdeckung erreicht werden, indem Sie mit speziell erstellten kleinen Modellen mit zufälligen Gewichten testen. Solche Modelle haben eine sehr geringe Anzahl von Schichten (z.B. 2), Vokabeln (z.B. 1000), usw. Dann können die `@slow`-Tests große langsame Modelle verwenden, um qualitative Tests durchzuführen. Um die Verwendung dieser Modelle zu sehen, suchen Sie einfach nach *winzigen* Modellen mit: ```bash grep tiny tests examples ``` Hier ist ein Beispiel für ein [Skript](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py), das das winzige Modell erstellt hat [stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). Sie können es ganz einfach an Ihre eigene Architektur Ihres Modells anpassen. Es ist leicht, die Laufzeit falsch zu messen, wenn zum Beispiel ein großes Modell heruntergeladen wird, aber wenn Sie es lokal testen, würden die heruntergeladenen Dateien zwischengespeichert und somit die Download-Zeit nicht gemessen werden. Prüfen Sie daher den Ausführungsgeschwindigkeitsbericht in den CI-Protokollen (die Ausgabe von `pytest --durations=0 tests`). Dieser Bericht ist auch nützlich, um langsame Ausreißer zu finden, die nicht als solche gekennzeichnet sind oder die neu geschrieben werden müssen, um schnell zu sein. Wenn Sie bemerken, dass die Testsuite beim CI langsam wird, zeigt die oberste Liste dieses Berichts die langsamsten Tests. ### Testen der stdout/stderr-Ausgabe Um Funktionen zu testen, die in `stdout` und/oder `stderr` schreiben, kann der Test auf diese Ströme zugreifen, indem er die [capsys system](https://docs.pytest.org/en/latest/capture.html) von `pytest` zugreifen. So wird dies bewerkstelligt: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` Und natürlich wird `stderr` in den meisten Fällen als Teil einer Ausnahme auftreten, so dass try/except in einem solchen Fall verwendet werden muss Fall verwendet werden: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` Ein anderer Ansatz zur Erfassung von stdout ist `contextlib.redirect_stdout`: ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` Ein wichtiges potenzielles Problem beim Erfassen von stdout ist, dass es `r` Zeichen enthalten kann, die bei normalem `print` alles zurücksetzen, was bisher gedruckt wurde. Mit `pytest` gibt es kein Problem, aber mit `pytest -s` werden diese werden diese Zeichen in den Puffer aufgenommen. Um den Test mit und ohne `-s` laufen zu lassen, müssen Sie also eine zusätzliche Bereinigung zusätzliche Bereinigung der erfassten Ausgabe vornehmen, indem Sie `re.sub(r'~.*\r', '', buf, 0, re.M)` verwenden. Aber dann haben wir einen Hilfskontextmanager-Wrapper, der sich automatisch um alles kümmert, unabhängig davon, ob er einige "*.*.*.*" enthält oder nicht: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` Hier ist ein vollständiges Testbeispiel: ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` Wenn Sie `stderr` aufzeichnen möchten, verwenden Sie stattdessen die Klasse `CaptureStderr`: ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` Wenn Sie beide Streams auf einmal erfassen müssen, verwenden Sie die übergeordnete Klasse `CaptureStd`: ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` Um das Debuggen von Testproblemen zu erleichtern, geben diese Kontextmanager standardmäßig die aufgezeichneten Streams beim Verlassen aus dem Kontext wieder. ### Erfassen von Logger-Streams Wenn Sie die Ausgabe eines Loggers validieren müssen, können Sie `CaptureLogger` verwenden: ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testen mit Umgebungsvariablen Wenn Sie die Auswirkungen von Umgebungsvariablen für einen bestimmten Test testen möchten, können Sie einen Hilfsdekorator verwenden `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` Manchmal muss ein externes Programm aufgerufen werden, was die Einstellung von `PYTHONPATH` in `os.environ` erfordert, um mehrere lokale Pfade einzuschließen. mehrere lokale Pfade. Eine Hilfsklasse `transformers.test_utils.TestCasePlus` hilft Ihnen dabei: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` Je nachdem, ob die Testdatei in der Testsuite `tests` oder in `examples` war, wird sie korrekt eingerichtet `env[PYTHONPATH]` eines dieser beiden Verzeichnisse und auch das `src` Verzeichnis, um sicherzustellen, dass der Test gegen das aktuelle um sicherzustellen, dass der Test mit dem aktuellen Projektarchiv durchgeführt wird, und schließlich mit dem, was in `env[PYTHONPATH]` bereits eingestellt war, bevor der Test aufgerufen wurde. wenn überhaupt. Diese Hilfsmethode erstellt eine Kopie des Objekts `os.environ`, so dass das Original intakt bleibt. ### Reproduzierbare Ergebnisse erhalten In manchen Situationen möchten Sie vielleicht die Zufälligkeit Ihrer Tests beseitigen. Um identische, reproduzierbare Ergebnisse zu erhalten, müssen Sie müssen Sie den Seed festlegen: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed) ``` ### Tests debuggen Um einen Debugger an der Stelle zu starten, an der die Warnung auftritt, gehen Sie wie folgt vor: ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Arbeiten mit Github-Aktionen-Workflows Um einen CI-Job für einen Self-Push-Workflow auszulösen, müssen Sie: 1. Erstellen Sie einen neuen Zweig auf `transformers` Ursprung (keine Gabelung!). 2. Der Name der Verzweigung muss entweder mit `ci_` oder `ci-` beginnen (`main` löst ihn auch aus, aber wir können keine PRs auf `main`). Es wird auch nur für bestimmte Pfade ausgelöst - Sie können die aktuelle Definition finden, falls sie falls sie sich seit der Erstellung dieses Dokuments geändert hat [hier](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml) unter *push:* 3. Erstellen Sie einen PR von diesem Zweig. 4. Dann können Sie sehen, wie der Job erscheint [hier](https://github.com/huggingface/transformers/actions/workflows/self-push.yml). Er wird möglicherweise nicht sofort ausgeführt, wenn es ein Backlog vorhanden ist. ## Testen experimenteller CI-Funktionen Das Testen von CI-Funktionen kann potenziell problematisch sein, da es die normale CI-Funktion beeinträchtigen kann. Wenn also eine neue CI-Funktion hinzugefügt werden soll, sollte dies wie folgt geschehen. 1. Erstellen Sie einen neuen Auftrag, der die zu testende Funktion testet. 2. Der neue Job muss immer erfolgreich sein, so dass er uns ein grünes ✓ gibt (Details unten). 3. Lassen Sie ihn einige Tage lang laufen, um zu sehen, dass eine Vielzahl verschiedener PR-Typen darauf laufen (Benutzer-Gabelzweige, nicht geforkte Zweige, Zweige, die von github.com UI direct file edit stammen, verschiedene erzwungene Pushes, etc. - es gibt es gibt so viele), während Sie die Protokolle des experimentellen Jobs überwachen (nicht den gesamten Job grün, da er absichtlich immer grün) 4. Wenn klar ist, dass alles in Ordnung ist, fügen Sie die neuen Änderungen in die bestehenden Jobs ein. Auf diese Weise wird der normale Arbeitsablauf nicht durch Experimente mit der CI-Funktionalität selbst beeinträchtigt. Wie können wir nun dafür sorgen, dass der Auftrag immer erfolgreich ist, während die neue CI-Funktion entwickelt wird? Einige CIs, wie TravisCI, unterstützen ignore-step-failure und melden den gesamten Job als erfolgreich, aber CircleCI und Github Actions unterstützen dies zum jetzigen Zeitpunkt nicht. Sie können also die folgende Abhilfe verwenden: 1. Setzen Sie `set +euo pipefail` am Anfang des Ausführungsbefehls, um die meisten potenziellen Fehler im Bash-Skript zu unterdrücken. 2. Der letzte Befehl muss ein Erfolg sein: `echo "done"` oder einfach `true` reicht aus. Hier ist ein Beispiel: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` Für einfache Befehle können Sie auch Folgendes tun: ```bash cmd_that_may_fail || true ``` Wenn Sie mit den Ergebnissen zufrieden sind, integrieren Sie den experimentellen Schritt oder Job natürlich in den Rest der normalen Jobs, Entfernen Sie dabei `set +euo pipefail` oder andere Dinge, die Sie eventuell hinzugefügt haben, um sicherzustellen, dass der experimentelle Auftrag nicht den normalen CI-Betrieb nicht beeinträchtigt. Dieser ganze Prozess wäre viel einfacher gewesen, wenn wir nur etwas wie `allow-failure` für den experimentellen Schritt festlegen könnten und ihn scheitern lassen würden, ohne den Gesamtstatus der PRs zu beeinträchtigen. Aber wie bereits erwähnt, haben CircleCI und Github Actions dies im Moment nicht unterstützen. Sie können in diesen CI-spezifischen Threads für diese Funktion stimmen und sehen, wo sie steht: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
transformers/docs/source/de/testing.md/0
{ "file_path": "transformers/docs/source/de/testing.md", "repo_id": "transformers", "token_count": 19303 }
245
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AltCLIP ## Overview The AltCLIP model was proposed in [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2) by Zhongzhi Chen, Guang Liu, Bo-Wen Zhang, Fulong Ye, Qinghong Yang, Ledell Wu. AltCLIP (Altering the Language Encoder in CLIP) is a neural network trained on a variety of image-text and text-text pairs. By switching CLIP's text encoder with a pretrained multilingual text encoder XLM-R, we could obtain very close performances with CLIP on almost all tasks, and extended original CLIP's capabilities such as multilingual understanding. The abstract from the paper is the following: *In this work, we present a conceptually simple and effective method to train a strong bilingual multimodal representation model. Starting from the pretrained multimodal representation model CLIP released by OpenAI, we switched its text encoder with a pretrained multilingual text encoder XLM-R, and aligned both languages and image representations by a two-stage training schema consisting of teacher learning and contrastive learning. We validate our method through evaluations of a wide range of tasks. We set new state-of-the-art performances on a bunch of tasks including ImageNet-CN, Flicker30k- CN, and COCO-CN. Further, we obtain very close performances with CLIP on almost all tasks, suggesting that one can simply alter the text encoder in CLIP for extended capabilities such as multilingual understanding.* This model was contributed by [jongjyh](https://huggingface.co/jongjyh). ## Usage tips and example The usage of AltCLIP is very similar to the CLIP. the difference between CLIP is the text encoder. Note that we use bidirectional attention instead of casual attention and we take the [CLS] token in XLM-R to represent text embedding. AltCLIP is a multi-modal vision and language model. It can be used for image-text similarity and for zero-shot image classification. AltCLIP uses a ViT like transformer to get visual features and a bidirectional language model to get the text features. Both the text and visual features are then projected to a latent space with identical dimension. The dot product between the projected image and text features is then used as a similar score. To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. The [`CLIPImageProcessor`] can be used to resize (or rescale) and normalize images for the model. The [`AltCLIPProcessor`] wraps a [`CLIPImageProcessor`] and a [`XLMRobertaTokenizer`] into a single instance to both encode the text and prepare the images. The following example shows how to get the image-text similarity scores using [`AltCLIPProcessor`] and [`AltCLIPModel`]. ```python >>> from PIL import Image >>> import requests >>> from transformers import AltCLIPModel, AltCLIPProcessor >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` <Tip> This model is based on `CLIPModel`, use it like you would use the original [CLIP](clip). </Tip> ## AltCLIPConfig [[autodoc]] AltCLIPConfig - from_text_vision_configs ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor ## AltCLIPModel [[autodoc]] AltCLIPModel - forward - get_text_features - get_image_features ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel - forward ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel - forward
transformers/docs/source/en/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/en/model_doc/altclip.md", "repo_id": "transformers", "token_count": 1400 }
246
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Big Transfer (BiT) ## Overview The BiT model was proposed in [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) by Alexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby. BiT is a simple recipe for scaling up pre-training of [ResNet](resnet)-like architectures (specifically, ResNetv2). The method results in significant improvements for transfer learning. The abstract from the paper is the following: *Transfer of pre-trained representations improves sample efficiency and simplifies hyperparameter tuning when training deep neural networks for vision. We revisit the paradigm of pre-training on large supervised datasets and fine-tuning the model on a target task. We scale up pre-training, and propose a simple recipe that we call Big Transfer (BiT). By combining a few carefully selected components, and transferring using a simple heuristic, we achieve strong performance on over 20 datasets. BiT performs well across a surprisingly wide range of data regimes -- from 1 example per class to 1M total examples. BiT achieves 87.5% top-1 accuracy on ILSVRC-2012, 99.4% on CIFAR-10, and 76.3% on the 19 task Visual Task Adaptation Benchmark (VTAB). On small datasets, BiT attains 76.8% on ILSVRC-2012 with 10 examples per class, and 97.0% on CIFAR-10 with 10 examples per class. We conduct detailed analysis of the main components that lead to high transfer performance.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/big_transfer). ## Usage tips - BiT models are equivalent to ResNetv2 in terms of architecture, except that: 1) all batch normalization layers are replaced by [group normalization](https://arxiv.org/abs/1803.08494), 2) [weight standardization](https://arxiv.org/abs/1903.10520) is used for convolutional layers. The authors show that the combination of both is useful for training with large batch sizes, and has a significant impact on transfer learning. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BiT. <PipelineTag pipeline="image-classification"/> - [`BitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## BitConfig [[autodoc]] BitConfig ## BitImageProcessor [[autodoc]] BitImageProcessor - preprocess ## BitModel [[autodoc]] BitModel - forward ## BitForImageClassification [[autodoc]] BitForImageClassification - forward
transformers/docs/source/en/model_doc/bit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bit.md", "repo_id": "transformers", "token_count": 1005 }
247
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLVP ## Overview The CLVP (Contrastive Language-Voice Pretrained Transformer) model was proposed in [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. The abstract from the paper is the following: *In recent years, the field of image generation has been revolutionized by the application of autoregressive transformers and DDPMs. These approaches model the process of image generation as a step-wise probabilistic processes and leverage large amounts of compute and data to learn the image distribution. This methodology of improving performance need not be confined to images. This paper describes a way to apply advances in the image generative domain to speech synthesis. The result is TorToise - an expressive, multi-voice text-to-speech system.* This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/neonbjb/tortoise-tts). ## Usage tips 1. CLVP is an integral part of the Tortoise TTS model. 2. CLVP can be used to compare different generated speech candidates with the provided text, and the best speech tokens are forwarded to the diffusion model. 3. The use of the [`ClvpModelForConditionalGeneration.generate()`] method is strongly recommended for tortoise usage. 4. Note that the CLVP model expects the audio to be sampled at 22.05 kHz contrary to other audio models which expects 16 kHz. ## Brief Explanation: - The [`ClvpTokenizer`] tokenizes the text input, and the [`ClvpFeatureExtractor`] extracts the log mel-spectrogram from the desired audio. - [`ClvpConditioningEncoder`] takes those text tokens and audio representations and converts them into embeddings conditioned on the text and audio. - The [`ClvpForCausalLM`] uses those embeddings to generate multiple speech candidates. - Each speech candidate is passed through the speech encoder ([`ClvpEncoder`]) which converts them into a vector representation, and the text encoder ([`ClvpEncoder`]) converts the text tokens into the same latent space. - At the end, we compare each speech vector with the text vector to see which speech vector is most similar to the text vector. - [`ClvpModelForConditionalGeneration.generate()`] compresses all of the logic described above into a single method. Example : ```python >>> import datasets >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). >>> text = "This is an example text." >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) >>> sample = ds[0]["audio"] >>> # Define processor and model. >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") >>> # Generate processor output and model output. >>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") >>> generated_output = model.generate(**processor_output) ``` ## ClvpConfig [[autodoc]] ClvpConfig - from_sub_model_configs ## ClvpEncoderConfig [[autodoc]] ClvpEncoderConfig ## ClvpDecoderConfig [[autodoc]] ClvpDecoderConfig ## ClvpTokenizer [[autodoc]] ClvpTokenizer - save_vocabulary ## ClvpFeatureExtractor [[autodoc]] ClvpFeatureExtractor - __call__ ## ClvpProcessor [[autodoc]] ClvpProcessor - __call__ - decode - batch_decode ## ClvpModelForConditionalGeneration [[autodoc]] ClvpModelForConditionalGeneration - forward - generate - get_text_features - get_speech_features ## ClvpForCausalLM [[autodoc]] ClvpForCausalLM ## ClvpModel [[autodoc]] ClvpModel ## ClvpEncoder [[autodoc]] ClvpEncoder ## ClvpDecoder [[autodoc]] ClvpDecoder
transformers/docs/source/en/model_doc/clvp.md/0
{ "file_path": "transformers/docs/source/en/model_doc/clvp.md", "repo_id": "transformers", "token_count": 1339 }
248
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Deformable DETR ## Overview The Deformable DETR model was proposed in [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. Deformable DETR mitigates the slow convergence issues and limited feature spatial resolution of the original [DETR](detr) by leveraging a new deformable attention module which only attends to a small set of key sampling points around a reference. The abstract from the paper is the following: *DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/deformable_detr_architecture.png" alt="drawing" width="600"/> <small> Deformable DETR architecture. Taken from the <a href="https://arxiv.org/abs/2010.04159">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/fundamentalvision/Deformable-DETR). ## Usage tips - Training Deformable DETR is equivalent to training the original [DETR](detr) model. See the [resources](#resources) section below for demo notebooks. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Deformable DETR. <PipelineTag pipeline="object-detection"/> - Demo notebooks regarding inference + fine-tuning on a custom dataset for [`DeformableDetrForObjectDetection`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Deformable-DETR). - See also: [Object detection task guide](../tasks/object_detection). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## DeformableDetrImageProcessor [[autodoc]] DeformableDetrImageProcessor - preprocess - post_process_object_detection ## DeformableDetrFeatureExtractor [[autodoc]] DeformableDetrFeatureExtractor - __call__ - post_process_object_detection ## DeformableDetrConfig [[autodoc]] DeformableDetrConfig ## DeformableDetrModel [[autodoc]] DeformableDetrModel - forward ## DeformableDetrForObjectDetection [[autodoc]] DeformableDetrForObjectDetection - forward
transformers/docs/source/en/model_doc/deformable_detr.md/0
{ "file_path": "transformers/docs/source/en/model_doc/deformable_detr.md", "repo_id": "transformers", "token_count": 1014 }
249
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ELECTRA <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=electra"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-electra-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/electra_large_discriminator_squad2_512"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ELECTRA model was proposed in the paper [ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators](https://openreview.net/pdf?id=r1xMH1BtvB). ELECTRA is a new pretraining approach which trains two transformer models: the generator and the discriminator. The generator's role is to replace tokens in a sequence, and is therefore trained as a masked language model. The discriminator, which is the model we're interested in, tries to identify which tokens were replaced by the generator in the sequence. The abstract from the paper is the following: *Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK] and then train a model to reconstruct the original tokens. While they produce good results when transferred to downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens rather than just the small subset that was masked out. As a result, the contextual representations learned by our approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained using 30x more compute) on the GLUE natural language understanding benchmark. Our approach also works well at scale, where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when using the same amount of compute.* This model was contributed by [lysandre](https://huggingface.co/lysandre). The original code can be found [here](https://github.com/google-research/electra). ## Usage tips - ELECTRA is the pretraining approach, therefore there is nearly no changes done to the underlying model: BERT. The only change is the separation of the embedding size and the hidden size: the embedding size is generally smaller, while the hidden size is larger. An additional projection layer (linear) is used to project the embeddings from their embedding size to the hidden size. In the case where the embedding size is the same as the hidden size, no projection layer is used. - ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA has to predict which token is an original and which one has been replaced. Like for GAN training, the small language model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a traditional GAN setting) then the ELECTRA model is trained for a few steps. - The ELECTRA checkpoints saved using [Google Research's implementation](https://github.com/google-research/electra) contain both the generator and discriminator. The conversion script requires the user to name which model to export into the correct architecture. Once converted to the HuggingFace format, these checkpoints may be loaded into all available ELECTRA models, however. This means that the discriminator may be loaded in the [`ElectraForMaskedLM`] model, and the generator may be loaded in the [`ElectraForPreTraining`] model (the classification head will be randomly initialized as it doesn't exist in the generator). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## ElectraConfig [[autodoc]] ElectraConfig ## ElectraTokenizer [[autodoc]] ElectraTokenizer ## ElectraTokenizerFast [[autodoc]] ElectraTokenizerFast ## Electra specific outputs [[autodoc]] models.electra.modeling_electra.ElectraForPreTrainingOutput [[autodoc]] models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput <frameworkcontent> <pt> ## ElectraModel [[autodoc]] ElectraModel - forward ## ElectraForPreTraining [[autodoc]] ElectraForPreTraining - forward ## ElectraForCausalLM [[autodoc]] ElectraForCausalLM - forward ## ElectraForMaskedLM [[autodoc]] ElectraForMaskedLM - forward ## ElectraForSequenceClassification [[autodoc]] ElectraForSequenceClassification - forward ## ElectraForMultipleChoice [[autodoc]] ElectraForMultipleChoice - forward ## ElectraForTokenClassification [[autodoc]] ElectraForTokenClassification - forward ## ElectraForQuestionAnswering [[autodoc]] ElectraForQuestionAnswering - forward </pt> <tf> ## TFElectraModel [[autodoc]] TFElectraModel - call ## TFElectraForPreTraining [[autodoc]] TFElectraForPreTraining - call ## TFElectraForMaskedLM [[autodoc]] TFElectraForMaskedLM - call ## TFElectraForSequenceClassification [[autodoc]] TFElectraForSequenceClassification - call ## TFElectraForMultipleChoice [[autodoc]] TFElectraForMultipleChoice - call ## TFElectraForTokenClassification [[autodoc]] TFElectraForTokenClassification - call ## TFElectraForQuestionAnswering [[autodoc]] TFElectraForQuestionAnswering - call </tf> <jax> ## FlaxElectraModel [[autodoc]] FlaxElectraModel - __call__ ## FlaxElectraForPreTraining [[autodoc]] FlaxElectraForPreTraining - __call__ ## FlaxElectraForCausalLM [[autodoc]] FlaxElectraForCausalLM - __call__ ## FlaxElectraForMaskedLM [[autodoc]] FlaxElectraForMaskedLM - __call__ ## FlaxElectraForSequenceClassification [[autodoc]] FlaxElectraForSequenceClassification - __call__ ## FlaxElectraForMultipleChoice [[autodoc]] FlaxElectraForMultipleChoice - __call__ ## FlaxElectraForTokenClassification [[autodoc]] FlaxElectraForTokenClassification - __call__ ## FlaxElectraForQuestionAnswering [[autodoc]] FlaxElectraForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/electra.md/0
{ "file_path": "transformers/docs/source/en/model_doc/electra.md", "repo_id": "transformers", "token_count": 2211 }
250
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fuyu ## Overview The Fuyu model was created by [ADEPT](https://www.adept.ai/blog/fuyu-8b), and authored by Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, Sağnak Taşırlar. The authors introduced Fuyu-8B, a decoder-only multimodal model based on the classic transformers architecture, with query and key normalization. A linear encoder is added to create multimodal embeddings from image inputs. By treating image tokens like text tokens and using a special image-newline character, the model knows when an image line ends. Image positional embeddings are removed. This avoids the need for different training phases for various image resolutions. With 8 billion parameters and licensed under CC-BY-NC, Fuyu-8B is notable for its ability to handle both text and images, its impressive context size of 16K, and its overall performance. <Tip warning={true}> The `Fuyu` models were trained using `bfloat16`, but the original inference uses `float16` The checkpoints uploaded on the hub use `torch_dtype = 'float16'` which will be used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`. The `dtype` of the online weights is mostly irrelevant, unless you are using `torch_dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online) then it will be cast to the default `dtype` of `torch` (becomes `torch.float32`). Users should specify the `torch_dtype` they want, and if they don't it will be `torch.float32`. Finetuning the model in `float16` is not recommended and known to produce `nan`, as such the model should be fine-tuned in `bfloat16`. </Tip> Tips: - To convert the model, you need to clone the original repository using `git clone https://github.com/persimmon-ai-labs/adept-inference`, then get the checkpoints: ```bash git clone https://github.com/persimmon-ai-labs/adept-inference wget path/to/fuyu-8b-model-weights.tar tar -xvf fuyu-8b-model-weights.tar python src/transformers/models/fuyu/convert_fuyu_weights_to_hf.py --input_dir /path/to/downloaded/fuyu/weights/ --output_dir /output/path \ --pt_model_path /path/to/fuyu_8b_release/iter_0001251/mp_rank_00/model_optim_rng.pt --ada_lib_path /path/to/adept-inference ``` For the chat model: ```bash wget https://axtkn4xl5cip.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axtkn4xl5cip/b/adept-public-data/o/8b_chat_model_release.tar tar -xvf 8b_base_model_release.tar ``` Then, model can be loaded via: ```py from transformers import FuyuConfig, FuyuForCausalLM model_config = FuyuConfig() model = FuyuForCausalLM(model_config).from_pretrained('/output/path') ``` Inputs need to be passed through a specific Processor to have the correct formats. A processor requires an image_processor and a tokenizer. Hence, inputs can be loaded via: ```py from PIL import Image from transformers import AutoTokenizer from transformers.models.fuyu.processing_fuyu import FuyuProcessor from transformers.models.fuyu.image_processing_fuyu import FuyuImageProcessor tokenizer = AutoTokenizer.from_pretrained('adept-hf-collab/fuyu-8b') image_processor = FuyuImageProcessor() processor = FuyuProcessor(image_processor=image_processor, tokenizer=tokenizer) text_prompt = "Generate a coco-style caption.\\n" bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png" bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content)) inputs_to_model = processor(text=text_prompt, images=bus_image_pil) ``` This model was contributed by [Molbap](https://huggingface.co/Molbap). The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference). - Fuyu uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer. The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. - The authors suggest to use the following prompt for image captioning: `f"Generate a coco-style caption.\\n"` ## FuyuConfig [[autodoc]] FuyuConfig ## FuyuForCausalLM [[autodoc]] FuyuForCausalLM - forward ## FuyuImageProcessor [[autodoc]] FuyuImageProcessor - __call__ ## FuyuProcessor [[autodoc]] FuyuProcessor - __call__
transformers/docs/source/en/model_doc/fuyu.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fuyu.md", "repo_id": "transformers", "token_count": 1657 }
251
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # I-BERT ## Overview The I-BERT model was proposed in [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney and Kurt Keutzer. It's a quantized version of RoBERTa running inference up to four times faster. The abstract from the paper is the following: *Transformer based models, like BERT and RoBERTa, have achieved state-of-the-art results in many Natural Language Processing tasks. However, their memory footprint, inference latency, and power consumption are prohibitive for efficient inference at the edge, and even at the data center. While quantization can be a viable solution for this, previous work on quantizing Transformer based models use floating-point arithmetic during inference, which cannot efficiently utilize integer-only logical units such as the recent Turing Tensor Cores, or traditional integer-only ARM processors. In this work, we propose I-BERT, a novel quantization scheme for Transformer based models that quantizes the entire inference with integer-only arithmetic. Based on lightweight integer-only approximation methods for nonlinear operations, e.g., GELU, Softmax, and Layer Normalization, I-BERT performs an end-to-end integer-only BERT inference without any floating point calculation. We evaluate our approach on GLUE downstream tasks using RoBERTa-Base/Large. We show that for both cases, I-BERT achieves similar (and slightly higher) accuracy as compared to the full-precision baseline. Furthermore, our preliminary implementation of I-BERT shows a speedup of 2.4 - 4.0x for INT8 inference on a T4 GPU system as compared to FP32 inference. The framework has been developed in PyTorch and has been open-sourced.* This model was contributed by [kssteven](https://huggingface.co/kssteven). The original code can be found [here](https://github.com/kssteven418/I-BERT). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/masked_language_modeling) ## IBertConfig [[autodoc]] IBertConfig ## IBertModel [[autodoc]] IBertModel - forward ## IBertForMaskedLM [[autodoc]] IBertForMaskedLM - forward ## IBertForSequenceClassification [[autodoc]] IBertForSequenceClassification - forward ## IBertForMultipleChoice [[autodoc]] IBertForMultipleChoice - forward ## IBertForTokenClassification [[autodoc]] IBertForTokenClassification - forward ## IBertForQuestionAnswering [[autodoc]] IBertForQuestionAnswering - forward
transformers/docs/source/en/model_doc/ibert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/ibert.md", "repo_id": "transformers", "token_count": 947 }
252
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaVa ## Overview LLaVa is an open-source chatbot trained by fine-tuning LlamA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. In other words, it is an multi-modal version of LLMs fine-tuned for chat / instructions. The LLaVa model was proposed in [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) and improved in [Improved Baselines with Visual Instruction Tuning](https://arxiv.org/pdf/2310.03744) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee. The abstract from the paper is the following: *Large multimodal models (LMM) have recently shown encouraging progress with visual instruction tuning. In this note, we show that the fully-connected vision-language cross-modal connector in LLaVA is surprisingly powerful and data-efficient. With simple modifications to LLaVA, namely, using CLIP-ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with simple response formatting prompts, we establish stronger baselines that achieve state-of-the-art across 11 benchmarks. Our final 13B checkpoint uses merely 1.2M publicly available data, and finishes full training in ∼1 day on a single 8-A100 node. We hope this can make state-of-the-art LMM research more accessible. Code and model will be publicly available* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_architecture.jpg" alt="drawing" width="600"/> <small> LLaVa architecture. Taken from the <a href="https://arxiv.org/abs/2304.08485">original paper.</a> </small> This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ) and [ybelkada](https://huggingface.co/ybelkada). The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main/llava). ## Usage tips - We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating. - Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results. - For better results, we recommend users to prompt the model with the correct prompt format: ```bash "USER: <image>\n<prompt>ASSISTANT:" ``` For multiple turns conversation: ```bash "USER: <image>\n<prompt1>ASSISTANT: <answer1>USER: <prompt2>ASSISTANT: <answer2>USER: <prompt3>ASSISTANT:" ``` ### Using Flash Attention 2 Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT. <PipelineTag pipeline="image-to-text"/> - A [Google Colab demo](https://colab.research.google.com/drive/1qsl6cd2c8gGtEW1xV5io7S8NHh-Cp1TV?usp=sharing) on how to run Llava on a free-tier Google colab instance leveraging 4-bit inference. - A [similar notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LLaVa/Inference_with_LLaVa_for_multimodal_generation.ipynb) showcasing batched inference. 🌎 ## LlavaConfig [[autodoc]] LlavaConfig ## LlavaProcessor [[autodoc]] LlavaProcessor ## LlavaForConditionalGeneration [[autodoc]] LlavaForConditionalGeneration - forward
transformers/docs/source/en/model_doc/llava.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llava.md", "repo_id": "transformers", "token_count": 1228 }
253
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # mT5 <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=mt5"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-mt5-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/mt5-small-finetuned-arxiv-cs-finetuned-arxiv-cs-full"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The mT5 model was presented in [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. The abstract from the paper is the following: *The recent "Text-to-Text Transfer Transformer" (T5) leveraged a unified text-to-text format and scale to attain state-of-the-art results on a wide variety of English-language NLP tasks. In this paper, we introduce mT5, a multilingual variant of T5 that was pre-trained on a new Common Crawl-based dataset covering 101 languages. We detail the design and modified training of mT5 and demonstrate its state-of-the-art performance on many multilingual benchmarks. We also describe a simple technique to prevent "accidental translation" in the zero-shot setting, where a generative model chooses to (partially) translate its prediction into the wrong language. All of the code and model checkpoints used in this work are publicly available.* Note: mT5 was only pre-trained on [mC4](https://huggingface.co/datasets/mc4) excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, unlike the original T5 model. Since mT5 was pre-trained unsupervisedly, there's no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. Google has released the following variants: - [google/mt5-small](https://huggingface.co/google/mt5-small) - [google/mt5-base](https://huggingface.co/google/mt5-base) - [google/mt5-large](https://huggingface.co/google/mt5-large) - [google/mt5-xl](https://huggingface.co/google/mt5-xl) - [google/mt5-xxl](https://huggingface.co/google/mt5-xxl). This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be found [here](https://github.com/google-research/multilingual-t5). ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## MT5Config [[autodoc]] MT5Config ## MT5Tokenizer [[autodoc]] MT5Tokenizer See [`T5Tokenizer`] for all details. ## MT5TokenizerFast [[autodoc]] MT5TokenizerFast See [`T5TokenizerFast`] for all details. <frameworkcontent> <pt> ## MT5Model [[autodoc]] MT5Model ## MT5ForConditionalGeneration [[autodoc]] MT5ForConditionalGeneration ## MT5EncoderModel [[autodoc]] MT5EncoderModel ## MT5ForSequenceClassification [[autodoc]] MT5ForSequenceClassification ## MT5ForTokenClassification [[autodoc]] MT5ForTokenClassification ## MT5ForQuestionAnswering [[autodoc]] MT5ForQuestionAnswering </pt> <tf> ## TFMT5Model [[autodoc]] TFMT5Model ## TFMT5ForConditionalGeneration [[autodoc]] TFMT5ForConditionalGeneration ## TFMT5EncoderModel [[autodoc]] TFMT5EncoderModel </tf> <jax> ## FlaxMT5Model [[autodoc]] FlaxMT5Model ## FlaxMT5ForConditionalGeneration [[autodoc]] FlaxMT5ForConditionalGeneration ## FlaxMT5EncoderModel [[autodoc]] FlaxMT5EncoderModel </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/mt5.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mt5.md", "repo_id": "transformers", "token_count": 1400 }
254
<!--Copyright 2024 The Qwen Team and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Qwen2 ## Overview Qwen2 is the new model series of large language models from the Qwen team. Previously, we released the Qwen series, including Qwen-72B, Qwen-1.8B, Qwen-VL, Qwen-Audio, etc. ### Model Details Qwen2 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. ## Usage tips `Qwen2-7B-beta` and `Qwen2-7B-Chat-beta` can be found on the [Huggingface Hub](https://huggingface.co/Qwen) In the following, we demonstrate how to use `Qwen2-7B-Chat-beta` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> device = "cuda" # the device to load the model onto >>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-7B-Chat", device_map="auto") >>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-7B-Chat") >>> prompt = "Give me a short introduction to large language model." >>> messages = [{"role": "user", "content": prompt}] >>> text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) >>> model_inputs = tokenizer([text], return_tensors="pt").to(device) >>> generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512, do_sample=True) >>> generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)] >>> response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ## Qwen2Config [[autodoc]] Qwen2Config ## Qwen2Tokenizer [[autodoc]] Qwen2Tokenizer - save_vocabulary ## Qwen2TokenizerFast [[autodoc]] Qwen2TokenizerFast ## Qwen2Model [[autodoc]] Qwen2Model - forward ## Qwen2ForCausalLM [[autodoc]] Qwen2ForCausalLM - forward ## Qwen2ForSequenceClassification [[autodoc]] Qwen2ForSequenceClassification - forward
transformers/docs/source/en/model_doc/qwen2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/qwen2.md", "repo_id": "transformers", "token_count": 918 }
255
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Swin Transformer ## Overview The Swin Transformer was proposed in [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo. The abstract from the paper is the following: *This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with \bold{S}hifted \bold{win}dows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/swin_transformer_architecture.png" alt="drawing" width="600"/> <small> Swin Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2102.03334">original paper</a>.</small> This model was contributed by [novice03](https://huggingface.co/novice03). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://github.com/microsoft/Swin-Transformer). ## Usage tips - Swin pads the inputs supporting any input height and width (if divisible by `32`). - Swin can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, sequence_length, num_channels)`. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Swin Transformer. <PipelineTag pipeline="image-classification"/> - [`SwinForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) Besides that: - [`SwinForMaskedImageModeling`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## SwinConfig [[autodoc]] SwinConfig <frameworkcontent> <pt> ## SwinModel [[autodoc]] SwinModel - forward ## SwinForMaskedImageModeling [[autodoc]] SwinForMaskedImageModeling - forward ## SwinForImageClassification [[autodoc]] transformers.SwinForImageClassification - forward </pt> <tf> ## TFSwinModel [[autodoc]] TFSwinModel - call ## TFSwinForMaskedImageModeling [[autodoc]] TFSwinForMaskedImageModeling - call ## TFSwinForImageClassification [[autodoc]] transformers.TFSwinForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/swin.md/0
{ "file_path": "transformers/docs/source/en/model_doc/swin.md", "repo_id": "transformers", "token_count": 1394 }
256
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ViTMAE ## Overview The ViTMAE model was proposed in [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377v2) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. The paper shows that, by pre-training a Vision Transformer (ViT) to reconstruct pixel values for masked patches, one can get results after fine-tuning that outperform supervised pre-training. The abstract from the paper is the following: *This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior.* <img src="https://user-images.githubusercontent.com/11435359/146857310-f258c86c-fde6-48e8-9cee-badd2b21bd2c.png" alt="drawing" width="600"/> <small> MAE architecture. Taken from the <a href="https://arxiv.org/abs/2111.06377">original paper.</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [sayakpaul](https://github.com/sayakpaul) and [ariG23498](https://github.com/ariG23498) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/mae). ## Usage tips - MAE (masked auto encoding) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training objective is relatively simple: by masking a large portion (75%) of the image patches, the model must reconstruct raw pixel values. One can use [`ViTMAEForPreTraining`] for this purpose. - After pre-training, one "throws away" the decoder used to reconstruct pixels, and one uses the encoder for fine-tuning/linear probing. This means that after fine-tuning, one can directly plug in the weights into a [`ViTForImageClassification`]. - One can use [`ViTImageProcessor`] to prepare images for the model. See the code examples for more info. - Note that the encoder of MAE is only used to encode the visual patches. The encoded patches are then concatenated with mask tokens, which the decoder (which also consists of Transformer blocks) takes as input. Each mask token is a shared, learned vector that indicates the presence of a missing patch to be predicted. Fixed sin/cos position embeddings are added both to the input of the encoder and the decoder. - For a visual understanding of how MAEs work you can check out this [post](https://keras.io/examples/vision/masked_image_modeling/). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViTMAE. - [`ViTMAEForPreTraining`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining), allowing you to pre-train the model from scratch/further pre-train the model on custom data. - A notebook that illustrates how to visualize reconstructed pixel values with [`ViTMAEForPreTraining`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/ViTMAE/ViT_MAE_visualization_demo.ipynb). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ViTMAEConfig [[autodoc]] ViTMAEConfig <frameworkcontent> <pt> ## ViTMAEModel [[autodoc]] ViTMAEModel - forward ## ViTMAEForPreTraining [[autodoc]] transformers.ViTMAEForPreTraining - forward </pt> <tf> ## TFViTMAEModel [[autodoc]] TFViTMAEModel - call ## TFViTMAEForPreTraining [[autodoc]] transformers.TFViTMAEForPreTraining - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/vit_mae.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vit_mae.md", "repo_id": "transformers", "token_count": 1492 }
257
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-RoBERTa <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xlm-roberta"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm--roberta-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xlm-roberta-base"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The XLM-RoBERTa model was proposed in [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. It is based on Facebook's RoBERTa model released in 2019. It is a large multi-lingual language model, trained on 2.5TB of filtered CommonCrawl data. The abstract from the paper is the following: *This paper shows that pretraining multilingual language models at scale leads to significant performance gains for a wide range of cross-lingual transfer tasks. We train a Transformer-based masked language model on one hundred languages, using more than two terabytes of filtered CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms multilingual BERT (mBERT) on a variety of cross-lingual benchmarks, including +13.8% average accuracy on XNLI, +12.3% average F1 score on MLQA, and +2.1% average F1 score on NER. XLM-R performs particularly well on low-resource languages, improving 11.8% in XNLI accuracy for Swahili and 9.2% for Urdu over the previous XLM model. We also present a detailed empirical evaluation of the key factors that are required to achieve these gains, including the trade-offs between (1) positive transfer and capacity dilution and (2) the performance of high and low resource languages at scale. Finally, we show, for the first time, the possibility of multilingual modeling without sacrificing per-language performance; XLM-Ris very competitive with strong monolingual models on the GLUE and XNLI benchmarks. We will make XLM-R code, data, and models publicly available.* This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/xlmr). ## Usage tips - XLM-RoBERTa is a multilingual model trained on 100 different languages. Unlike some XLM multilingual models, it does not require `lang` tensors to understand which language is used, and should be able to determine the correct language from the input ids. - Uses RoBERTa tricks on the XLM approach, but does not use the translation language modeling objective. It only uses masked language modeling on sentences coming from one language. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with XLM-RoBERTa. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on how to [finetune XLM RoBERTa for multiclass classification with Habana Gaudi on AWS](https://www.philschmid.de/habana-distributed-training) - [`XLMRobertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFXLMRobertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). - [`FlaxXLMRobertaForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb). - [Text classification](https://huggingface.co/docs/transformers/tasks/sequence_classification) chapter of the 🤗 Hugging Face Task Guides. - [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="token-classification"/> - [`XLMRobertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFXLMRobertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [`FlaxXLMRobertaForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Token classification task guide](../tasks/token_classification) <PipelineTag pipeline="text-generation"/> - [`XLMRobertaForCausalLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) chapter of the 🤗 Hugging Face Task Guides. - [Causal language modeling task guide](../tasks/language_modeling) <PipelineTag pipeline="fill-mask"/> - [`XLMRobertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFXLMRobertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxXLMRobertaForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. - [Masked language modeling](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`XLMRobertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFXLMRobertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [`FlaxXLMRobertaForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. - [Question answering task guide](../tasks/question_answering) **Multiple choice** - [`XLMRobertaForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb). - [`TFXLMRobertaForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). - [Multiple choice task guide](../tasks/multiple_choice) 🚀 Deploy - A blog post on how to [Deploy Serverless XLM RoBERTa on AWS Lambda](https://www.philschmid.de/multilingual-serverless-xlm-roberta-with-huggingface). <Tip> This implementation is the same as RoBERTa. Refer to the [documentation of RoBERTa](roberta) for usage examples as well as the information relative to the inputs and outputs. </Tip> ## XLMRobertaConfig [[autodoc]] XLMRobertaConfig ## XLMRobertaTokenizer [[autodoc]] XLMRobertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLMRobertaTokenizerFast [[autodoc]] XLMRobertaTokenizerFast <frameworkcontent> <pt> ## XLMRobertaModel [[autodoc]] XLMRobertaModel - forward ## XLMRobertaForCausalLM [[autodoc]] XLMRobertaForCausalLM - forward ## XLMRobertaForMaskedLM [[autodoc]] XLMRobertaForMaskedLM - forward ## XLMRobertaForSequenceClassification [[autodoc]] XLMRobertaForSequenceClassification - forward ## XLMRobertaForMultipleChoice [[autodoc]] XLMRobertaForMultipleChoice - forward ## XLMRobertaForTokenClassification [[autodoc]] XLMRobertaForTokenClassification - forward ## XLMRobertaForQuestionAnswering [[autodoc]] XLMRobertaForQuestionAnswering - forward </pt> <tf> ## TFXLMRobertaModel [[autodoc]] TFXLMRobertaModel - call ## TFXLMRobertaForCausalLM [[autodoc]] TFXLMRobertaForCausalLM - call ## TFXLMRobertaForMaskedLM [[autodoc]] TFXLMRobertaForMaskedLM - call ## TFXLMRobertaForSequenceClassification [[autodoc]] TFXLMRobertaForSequenceClassification - call ## TFXLMRobertaForMultipleChoice [[autodoc]] TFXLMRobertaForMultipleChoice - call ## TFXLMRobertaForTokenClassification [[autodoc]] TFXLMRobertaForTokenClassification - call ## TFXLMRobertaForQuestionAnswering [[autodoc]] TFXLMRobertaForQuestionAnswering - call </tf> <jax> ## FlaxXLMRobertaModel [[autodoc]] FlaxXLMRobertaModel - __call__ ## FlaxXLMRobertaForCausalLM [[autodoc]] FlaxXLMRobertaForCausalLM - __call__ ## FlaxXLMRobertaForMaskedLM [[autodoc]] FlaxXLMRobertaForMaskedLM - __call__ ## FlaxXLMRobertaForSequenceClassification [[autodoc]] FlaxXLMRobertaForSequenceClassification - __call__ ## FlaxXLMRobertaForMultipleChoice [[autodoc]] FlaxXLMRobertaForMultipleChoice - __call__ ## FlaxXLMRobertaForTokenClassification [[autodoc]] FlaxXLMRobertaForTokenClassification - __call__ ## FlaxXLMRobertaForQuestionAnswering [[autodoc]] FlaxXLMRobertaForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/xlm-roberta.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-roberta.md", "repo_id": "transformers", "token_count": 3907 }
258
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Custom hardware for training The hardware you use to run model training and inference can have a big effect on performance. For a deep dive into GPUs make sure to check out Tim Dettmer's excellent [blog post](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/). Let's have a look at some practical advice for GPU setups. ## GPU When you train bigger models you have essentially three options: - bigger GPUs - more GPUs - more CPU and NVMe (offloaded to by [DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)) Let's start at the case where you have a single GPU. ### Power and Cooling If you bought an expensive high end GPU make sure you give it the correct power and sufficient cooling. **Power**: Some high end consumer GPU cards have 2 and sometimes 3 PCI-E 8-Pin power sockets. Make sure you have as many independent 12V PCI-E 8-Pin cables plugged into the card as there are sockets. Do not use the 2 splits at one end of the same cable (also known as pigtail cable). That is if you have 2 sockets on the GPU, you want 2 PCI-E 8-Pin cables going from your PSU to the card and not one that has 2 PCI-E 8-Pin connectors at the end! You won't get the full performance out of your card otherwise. Each PCI-E 8-Pin power cable needs to be plugged into a 12V rail on the PSU side and can supply up to 150W of power. Some other cards may use a PCI-E 12-Pin connectors, and these can deliver up to 500-600W of power. Low end cards may use 6-Pin connectors, which supply up to 75W of power. Additionally you want the high-end PSU that has stable voltage. Some lower quality ones may not give the card the stable voltage it needs to function at its peak. And of course the PSU needs to have enough unused Watts to power the card. **Cooling**: When a GPU gets overheated it will start throttling down and will not deliver full performance and it can even shutdown if it gets too hot. It's hard to tell the exact best temperature to strive for when a GPU is heavily loaded, but probably anything under +80C is good, but lower is better - perhaps 70-75C is an excellent range to be in. The throttling down is likely to start at around 84-90C. But other than throttling performance a prolonged very high temperature is likely to reduce the lifespan of a GPU. Next let's have a look at one of the most important aspects when having multiple GPUs: connectivity. ### Multi-GPU Connectivity If you use multiple GPUs the way cards are inter-connected can have a huge impact on the total training time. If the GPUs are on the same physical node, you can run: ```bash nvidia-smi topo -m ``` and it will tell you how the GPUs are inter-connected. On a machine with dual-GPU and which are connected with NVLink, you will most likely see something like: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` on a different machine w/o NVLink we may see: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` The report includes this legend: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` So the first report `NV2` tells us the GPUs are interconnected with 2 NVLinks, and the second report `PHB` we have a typical consumer-level PCIe+Bridge setup. Check what type of connectivity you have on your setup. Some of these will make the communication between cards faster (e.g. NVLink), others slower (e.g. PHB). Depending on the type of scalability solution used, the connectivity speed could have a major or a minor impact. If the GPUs need to sync rarely, as in DDP, the impact of a slower connection will be less significant. If the GPUs need to send messages to each other often, as in ZeRO-DP, then faster connectivity becomes super important to achieve faster training. #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink) is a wire-based serial multi-lane near-range communications link developed by Nvidia. Each new generation provides a faster bandwidth, e.g. here is a quote from [Nvidia Ampere GA102 GPU Architecture](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf): > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) So the higher `X` you get in the report of `NVX` in the output of `nvidia-smi topo -m` the better. The generation will depend on your GPU architecture. Let's compare the execution of a openai-community/gpt2 language model training over a small sample of wikitext. The results are: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | You can see that NVLink completes the training ~23% faster. In the second benchmark we use `NCCL_P2P_DISABLE=1` to tell the GPUs not to use NVLink. Here is the full benchmark code and outputs: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/en/perf_hardware.md/0
{ "file_path": "transformers/docs/source/en/perf_hardware.md", "repo_id": "transformers", "token_count": 2317 }
259
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Preprocess [[open-in-colab]] Before you can train a model on a dataset, it needs to be preprocessed into the expected model input format. Whether your data is text, images, or audio, they need to be converted and assembled into batches of tensors. 🤗 Transformers provides a set of preprocessing classes to help prepare your data for the model. In this tutorial, you'll learn that for: * Text, use a [Tokenizer](./main_classes/tokenizer) to convert text into a sequence of tokens, create a numerical representation of the tokens, and assemble them into tensors. * Speech and audio, use a [Feature extractor](./main_classes/feature_extractor) to extract sequential features from audio waveforms and convert them into tensors. * Image inputs use a [ImageProcessor](./main_classes/image_processor) to convert images into tensors. * Multimodal inputs, use a [Processor](./main_classes/processors) to combine a tokenizer and a feature extractor or image processor. <Tip> `AutoProcessor` **always** works and automatically chooses the correct class for the model you're using, whether you're using a tokenizer, image processor, feature extractor or processor. </Tip> Before you begin, install 🤗 Datasets so you can load some datasets to experiment with: ```bash pip install datasets ``` ## Natural Language Processing <Youtube id="Yffk5aydLzg"/> The main tool for preprocessing textual data is a [tokenizer](main_classes/tokenizer). A tokenizer splits text into *tokens* according to a set of rules. The tokens are converted into numbers and then tensors, which become the model inputs. Any additional inputs required by the model are added by the tokenizer. <Tip> If you plan on using a pretrained model, it's important to use the associated pretrained tokenizer. This ensures the text is split the same way as the pretraining corpus, and uses the same corresponding tokens-to-index (usually referred to as the *vocab*) during pretraining. </Tip> Get started by loading a pretrained tokenizer with the [`AutoTokenizer.from_pretrained`] method. This downloads the *vocab* a model was pretrained with: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` Then pass your text to the tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` The tokenizer returns a dictionary with three important items: * [input_ids](glossary#input-ids) are the indices corresponding to each token in the sentence. * [attention_mask](glossary#attention-mask) indicates whether a token should be attended to or not. * [token_type_ids](glossary#token-type-ids) identifies which sequence a token belongs to when there is more than one sequence. Return your input by decoding the `input_ids`: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` As you can see, the tokenizer added two special tokens - `CLS` and `SEP` (classifier and separator) - to the sentence. Not all models need special tokens, but if they do, the tokenizer automatically adds them for you. If there are several sentences you want to preprocess, pass them as a list to the tokenizer: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad Sentences aren't always the same length which can be an issue because tensors, the model inputs, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special *padding token* to shorter sentences. Set the `padding` parameter to `True` to pad the shorter sequences in the batch to match the longest sequence: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` The first and third sentences are now padded with `0`'s because they are shorter. ### Truncation On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you'll need to truncate the sequence to a shorter length. Set the `truncation` parameter to `True` to truncate a sequence to the maximum length accepted by the model: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> Check out the [Padding and truncation](./pad_truncation) concept guide to learn more different padding and truncation arguments. </Tip> ### Build tensors Finally, you want the tokenizer to return the actual tensors that get fed to the model. Set the `return_tensors` parameter to either `pt` for PyTorch, or `tf` for TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> <Tip> Different pipelines support tokenizer arguments in their `__call__()` differently. `text-2-text-generation` pipelines support (i.e. pass on) only `truncation`. `text-generation` pipelines support `max_length`, `truncation`, `padding` and `add_special_tokens`. In `fill-mask` pipelines, tokenizer arguments can be passed in the `tokenizer_kwargs` argument (dictionary). </Tip> ## Audio For audio tasks, you'll need a [feature extractor](main_classes/feature_extractor) to prepare your dataset for the model. The feature extractor is designed to extract features from raw audio data, and convert them into tensors. Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a feature extractor with audio datasets: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Access the first element of the `audio` column to take a look at the input. Calling the `audio` column automatically loads and resamples the audio file: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` This returns three items: * `array` is the speech signal loaded - and potentially resampled - as a 1D array. * `path` points to the location of the audio file. * `sampling_rate` refers to how many data points in the speech signal are measured per second. For this tutorial, you'll use the [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model. Take a look at the model card, and you'll learn Wav2Vec2 is pretrained on 16kHz sampled speech audio. It is important your audio data's sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data's sampling rate isn't the same, then you need to resample your data. 1. Use 🤗 Datasets' [`~datasets.Dataset.cast_column`] method to upsample the sampling rate to 16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. Call the `audio` column again to resample the audio file: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` Next, load a feature extractor to normalize and pad the input. When padding textual data, a `0` is added for shorter sequences. The same idea applies to audio data. The feature extractor adds a `0` - interpreted as silence - to `array`. Load the feature extractor with [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` Pass the audio `array` to the feature extractor. We also recommend adding the `sampling_rate` argument in the feature extractor in order to better debug any silent errors that may occur. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` Just like the tokenizer, you can apply padding or truncation to handle variable sequences in a batch. Take a look at the sequence length of these two audio samples: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` Create a function to preprocess the dataset so the audio samples are the same lengths. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` Apply the `preprocess_function` to the first few examples in the dataset: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` The sample lengths are now the same and match the specified maximum length. You can pass your processed dataset to the model now! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## Computer vision For computer vision tasks, you'll need an [image processor](main_classes/image_processor) to prepare your dataset for the model. Image preprocessing consists of several steps that convert images into the input expected by the model. These steps include but are not limited to resizing, normalizing, color channel correction, and converting images to tensors. <Tip> Image preprocessing often follows some form of image augmentation. Both image preprocessing and image augmentation transform image data, but they serve different purposes: * Image augmentation alters images in a way that can help prevent overfitting and increase the robustness of the model. You can get creative in how you augment your data - adjust brightness and colors, crop, rotate, resize, zoom, etc. However, be mindful not to change the meaning of the images with your augmentations. * Image preprocessing guarantees that the images match the model’s expected input format. When fine-tuning a computer vision model, images must be preprocessed exactly as when the model was initially trained. You can use any library you like for image augmentation. For image preprocessing, use the `ImageProcessor` associated with the model. </Tip> Load the [food101](https://huggingface.co/datasets/food101) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use an image processor with computer vision datasets: <Tip> Use 🤗 Datasets `split` parameter to only load a small sample from the training split since the dataset is quite large! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` Next, take a look at the image with 🤗 Datasets [`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) feature: ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> Load the image processor with [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` First, let's add some image augmentation. You can use any library you prefer, but in this tutorial, we'll use torchvision's [`transforms`](https://pytorch.org/vision/stable/transforms.html) module. If you're interested in using another data augmentation library, learn how in the [Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) or [Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb). 1. Here we use [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) to chain together a couple of transforms - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) and [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html). Note that for resizing, we can get the image size requirements from the `image_processor`. For some models, an exact height and width are expected, for others only the `shortest_edge` is defined. ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. The model accepts [`pixel_values`](model_doc/vision-encoder-decoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) as its input. `ImageProcessor` can take care of normalizing the images, and generating appropriate tensors. Create a function that combines image augmentation and image preprocessing for a batch of images and generates `pixel_values`: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> In the example above we set `do_resize=False` because we have already resized the images in the image augmentation transformation, and leveraged the `size` attribute from the appropriate `image_processor`. If you do not resize images during image augmentation, leave this parameter out. By default, `ImageProcessor` will handle the resizing. If you wish to normalize images as a part of the augmentation transformation, use the `image_processor.image_mean`, and `image_processor.image_std` values. </Tip> 3. Then use 🤗 Datasets[`~datasets.Dataset.set_transform`] to apply the transforms on the fly: ```py >>> dataset.set_transform(transforms) ``` 4. Now when you access the image, you'll notice the image processor has added `pixel_values`. You can pass your processed dataset to the model now! ```py >>> dataset[0].keys() ``` Here is what the image looks like after the transforms are applied. The image has been randomly cropped and it's color properties are different. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> For tasks like object detection, semantic segmentation, instance segmentation, and panoptic segmentation, `ImageProcessor` offers post processing methods. These methods convert model's raw outputs into meaningful predictions such as bounding boxes, or segmentation maps. </Tip> ### Pad In some cases, for instance, when fine-tuning [DETR](./model_doc/detr), the model applies scale augmentation at training time. This may cause images to be different sizes in a batch. You can use [`DetrImageProcessor.pad`] from [`DetrImageProcessor`] and define a custom `collate_fn` to batch images together. ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Multimodal For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples together two processing objects such as as tokenizer and feature extractor. Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR): ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` For ASR, you're mainly focused on `audio` and `text` so you can remove the other columns: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` Now take a look at the `audio` and `text` columns: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` Remember you should always [resample](preprocessing#audio) your audio dataset's sampling rate to match the sampling rate of the dataset used to pretrain a model! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` Load a processor with [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. Create a function to process the audio data contained in `array` to `input_values`, and tokenize `text` to `labels`. These are the inputs to the model: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. Apply the `prepare_dataset` function to a sample: ```py >>> prepare_dataset(lj_speech[0]) ``` The processor has now added `input_values` and `labels`, and the sampling rate has also been correctly downsampled to 16kHz. You can pass your processed dataset to the model now!
transformers/docs/source/en/preprocessing.md/0
{ "file_path": "transformers/docs/source/en/preprocessing.md", "repo_id": "transformers", "token_count": 8689 }
260
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Zero-shot image classification [[open-in-colab]] Zero-shot image classification is a task that involves classifying images into different categories using a model that was not explicitly trained on data containing labeled examples from those specific categories. Traditionally, image classification requires training a model on a specific set of labeled images, and this model learns to "map" certain image features to labels. When there's a need to use such model for a classification task that introduces a new set of labels, fine-tuning is required to "recalibrate" the model. In contrast, zero-shot or open vocabulary image classification models are typically multi-modal models that have been trained on a large dataset of images and associated descriptions. These models learn aligned vision-language representations that can be used for many downstream tasks including zero-shot image classification. This is a more flexible approach to image classification that allows models to generalize to new and unseen categories without the need for additional training data and enables users to query images with free-form text descriptions of their target objects . In this guide you'll learn how to: * create a zero-shot image classification pipeline * run zero-shot image classification inference by hand Before you begin, make sure you have all the necessary libraries installed: ```bash pip install -q transformers ``` ## Zero-shot image classification pipeline The simplest way to try out inference with a model supporting zero-shot image classification is to use the corresponding [`pipeline`]. Instantiate a pipeline from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads): ```python >>> from transformers import pipeline >>> checkpoint = "openai/clip-vit-large-patch14" >>> detector = pipeline(model=checkpoint, task="zero-shot-image-classification") ``` Next, choose an image you'd like to classify. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/owl.jpg" alt="Photo of an owl"/> </div> Pass the image and the candidate object labels to the pipeline. Here we pass the image directly; other suitable options include a local path to an image or an image url. The candidate labels can be simple words like in this example, or more descriptive. ```py >>> predictions = detector(image, candidate_labels=["fox", "bear", "seagull", "owl"]) >>> predictions [{'score': 0.9996670484542847, 'label': 'owl'}, {'score': 0.000199399160919711, 'label': 'seagull'}, {'score': 7.392891711788252e-05, 'label': 'fox'}, {'score': 5.96074532950297e-05, 'label': 'bear'}] ``` ## Zero-shot image classification by hand Now that you've seen how to use the zero-shot image classification pipeline, let's take a look how you can run zero-shot image classification manually. Start by loading the model and associated processor from a [checkpoint on the Hugging Face Hub](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads). Here we'll use the same checkpoint as before: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotImageClassification >>> model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` Let's take a different image to switch things up. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" alt="Photo of a car"/> </div> Use the processor to prepare the inputs for the model. The processor combines an image processor that prepares the image for the model by resizing and normalizing it, and a tokenizer that takes care of the text inputs. ```py >>> candidate_labels = ["tree", "car", "bike", "cat"] >>> inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True) ``` Pass the inputs through the model, and post-process the results: ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits_per_image[0] >>> probs = logits.softmax(dim=-1).numpy() >>> scores = probs.tolist() >>> result = [ ... {"score": score, "label": candidate_label} ... for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0]) ... ] >>> result [{'score': 0.998572, 'label': 'car'}, {'score': 0.0010570387, 'label': 'bike'}, {'score': 0.0003393686, 'label': 'tree'}, {'score': 3.1572064e-05, 'label': 'cat'}] ```
transformers/docs/source/en/tasks/zero_shot_image_classification.md/0
{ "file_path": "transformers/docs/source/en/tasks/zero_shot_image_classification.md", "repo_id": "transformers", "token_count": 1757 }
261
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mecanismos de atención La mayoría de los modelos transformers utilizan atención completa, en el sentido de que la matriz de atención es cuadrada. Esto puede ser un gran cuello de botella computacional cuando tienes textos largos. `Longformer` y `reformer` son modelos que intentan ser más eficientes y utilizan una versión dispersa de la matriz de atención para acelerar el entrenamiento. ## Atención LSH [Reformer](https://huggingface.co/docs/transformers/model_doc/reformer) utiliza atención LSH. En el softmax(QK^t), solo los elementos más grandes (en la dimensión softmax) de la matriz QK^t van a dar contribuciones útiles. Entonces, para cada consulta q en Q, podemos considerar solo las claves k en K que estén cerca de q. Se utiliza una función hash para determinar si q y k están cerca. La máscara de atención se modifica para enmascarar el token actual (excepto en la primera posición), porque dará una consulta y una clave iguales (entonces muy similares entre sí). Dado que el hash puede ser un poco aleatorio, en la práctica se utilizan varias funciones hash (determinadas por un parámetro n_rounds) y luego se promedian juntas. ## Atención local [Longformer](https://huggingface.co/docs/transformers/model_doc/longformer) utiliza atención local: a menudo, el contexto local (por ejemplo, ¿cuáles son los dos tokens a la izquierda y a la derecha?) es suficiente para tomar acción para un token dado. Además, apilando capas de atención que tienen una ventana pequeña, la última capa tendrá un campo receptivo mayor que solamente los tokens en la ventana, lo que les permite construir una representación de toda la oración. Algunos tokens de entrada preseleccionados también reciben atención global: para esos pocos tokens, la matriz de atención puede acceder a todos los tokens y este proceso es simétrico: todos los demás tokens tienen acceso a esos tokens específicos (además de los que están en su ventana local). Esto se muestra en la Figura 2d del artículo, el cual se puede apreciar un ejemplo de una máscara de atención: <div class="flex justify-center"> <img scale="50 %" align="center" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/local_attention_mask.png"/> </div> El uso de dichas matrices de atención con menos parámetros permite que el modelo tenga entradas con una longitud de secuencia mayor. ## Otros trucos ### Codificación posicional axial [Reformer](https://huggingface.co/docs/transformers/model_doc/reformer) utiliza codificación posicional axial: en los modelos transformers tradicionales, la codificación posicional E es una matriz de tamaño \\(l\\) por \\(d\\), donde \\(l\\) es la longitud de la secuencia y \\(d\\) es la dimensión del estado oculto. Si tienes textos muy extensos, esta matriz puede ser enorme y ocupar demasiado espacio en la GPU. Para aliviar eso, las codificaciones posicionales axiales consisten en factorizar esa gran matriz E en dos matrices más pequeñas E1 y E2, con dimensiones \\(l_{1} \times d_{1}\\) y \\(l_{2} \times d_{2}\\), tal que \\(l_{1} \times l_{2} = l\\) y \\(d_{1} + d_{2} = d\\) (con el producto de las longitudes, esto termina siendo mucho más pequeño). La incrustación (embedding) para el paso de tiempo \\(j\\) en E se obtiene concatenando las incrustaciones para el paso de tiempo \\(j \% l1\\) en E1 y \\(j // l1\\) en E2.
transformers/docs/source/es/attention.md/0
{ "file_path": "transformers/docs/source/es/attention.md", "repo_id": "transformers", "token_count": 1396 }
262
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Rendimiento y Escalabilidad Entrenar modelos grandes de transformadores y desplegarlos en producción presenta varios desafíos. Durante el entrenamiento, el modelo puede requerir más memoria de GPU de la disponible o mostrar una velocidad de entrenamiento lenta. En la fase de implementación, el modelo puede tener dificultades para manejar el rendimiento necesario en un entorno de producción. Esta documentación tiene como objetivo ayudarte a superar estos desafíos y encontrar la configuración óptima para tu caso de uso. Las guías están divididas en secciones de entrenamiento e inferencia, ya que cada una presenta diferentes desafíos y soluciones. Dentro de cada sección, encontrarás guías separadas para diferentes configuraciones de hardware, como GPU única vs. multi-GPU para el entrenamiento o CPU vs. GPU para la inferencia. Utiliza este documento como punto de partida para navegar hacia los métodos que se ajusten a tu escenario. ## Entrenamiento Entrenar modelos grandes de transformadores de manera eficiente requiere un acelerador como una GPU o TPU. El caso más común es cuando tienes una GPU única. Los métodos que puedes aplicar para mejorar la eficiencia de entrenamiento en una GPU única también se aplican a otras configuraciones, como múltiples GPU. Sin embargo, también existen técnicas específicas para entrenamiento con múltiples GPU o CPU, las cuales cubrimos en secciones separadas. * [Métodos y herramientas para un entrenamiento eficiente en una sola GPU](https://huggingface.co/docs/transformers/perf_train_gpu_one): comienza aquí para aprender enfoques comunes que pueden ayudar a optimizar la utilización de memoria de la GPU, acelerar el entrenamiento o ambas cosas. * [Sección de entrenamiento con varias GPU](https://huggingface.co/docs/transformers/perf_train_gpu_many): explora esta sección para conocer métodos de optimización adicionales que se aplican a configuraciones con varias GPU, como paralelismo de datos, tensores y canalizaciones. * [Sección de entrenamiento en CPU](https://huggingface.co/docs/transformers/perf_train_cpu): aprende sobre entrenamiento de precisión mixta en CPU. * [Entrenamiento eficiente en múltiples CPUs](https://huggingface.co/docs/transformers/perf_train_cpu_many): aprende sobre el entrenamiento distribuido en CPU. * [Entrenamiento en TPU con TensorFlow](https://huggingface.co/docs/transformers/perf_train_tpu_tf): si eres nuevo en TPUs, consulta esta sección para obtener una introducción basada en opiniones sobre el entrenamiento en TPUs y el uso de XLA. * [Hardware personalizado para el entrenamiento](https://huggingface.co/docs/transformers/perf_hardware): encuentra consejos y trucos al construir tu propia plataforma de aprendizaje profundo. * [Búsqueda de hiperparámetros utilizando la API del Entrenador](https://huggingface.co/docs/transformers/hpo_train) ## Inferencia Realizar inferencias eficientes con modelos grandes en un entorno de producción puede ser tan desafiante como entrenarlos. En las siguientes secciones, describimos los pasos para ejecutar inferencias en CPU y configuraciones con GPU única/múltiple. * [Inferencia en una sola CPU](https://huggingface.co/docs/transformers/perf_infer_cpu) * [Inferencia en una sola GPU](https://huggingface.co/docs/transformers/perf_infer_gpu_one) * [Inferencia con múltiples GPU](https://huggingface.co/docs/transformers/perf_infer_gpu_one) * [Integración de XLA para modelos de TensorFlow](https://huggingface.co/docs/transformers/tf_xla) ## Entrenamiento e Inferencia Aquí encontrarás técnicas, consejos y trucos que aplican tanto si estás entrenando un modelo como si estás ejecutando inferencias con él. * [Instanciar un modelo grande](https://huggingface.co/docs/transformers/big_models) * [Solución de problemas de rendimiento](https://huggingface.co/docs/transformers/debugging) ## Contribuir Este documento está lejos de estar completo y aún se deben agregar muchas cosas, así que si tienes adiciones o correcciones que hacer, no dudes en abrir un PR. Si no estás seguro, inicia un Issue y podemos discutir los detalles allí. Cuando hagas contribuciones que indiquen que A es mejor que B, intenta incluir un benchmark reproducible y/o un enlace a la fuente de esa información (a menos que provenga directamente de ti).
transformers/docs/source/es/performance.md/0
{ "file_path": "transformers/docs/source/es/performance.md", "repo_id": "transformers", "token_count": 1751 }
263
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Respuesta a preguntas <Youtube id="ajPx5LwJD-I"/> La respuesta a preguntas devuelve una respuesta a partir de una pregunta dada. Existen dos formas comunes de responder preguntas: - Extractiva: extraer la respuesta a partir del contexto dado. - Abstractiva: generar una respuesta que responda correctamente la pregunta a partir del contexto dado. Esta guía te mostrará como hacer fine-tuning de [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) en el dataset [SQuAD](https://huggingface.co/datasets/squad) para responder preguntas de forma extractiva. <Tip> Revisa la [página de la tarea](https://huggingface.co/tasks/question-answering) de responder preguntas para tener más información sobre otras formas de responder preguntas y los modelos, datasets y métricas asociadas. </Tip> ## Carga el dataset SQuAD Carga el dataset SQuAD con la biblioteca 🤗 Datasets: ```py >>> from datasets import load_dataset >>> squad = load_dataset("squad") ``` Ahora, échale un vistazo a una muestra: ```py >>> squad["train"][0] {'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']}, 'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.', 'id': '5733be284776f41900661182', 'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?', 'title': 'University_of_Notre_Dame' } ``` El campo `answers` es un diccionario que contiene la posición inicial de la respuesta y el `texto` de la respuesta. ## Preprocesamiento <Youtube id="qgaM0weJHpA"/> Carga el tokenizer de DistilBERT para procesar los campos `question` (pregunta) y `context` (contexto): ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Hay algunos pasos de preprocesamiento específicos para la tarea de respuesta a preguntas que debes tener en cuenta: 1. Algunos ejemplos en un dataset pueden tener un contexto que supera la longitud máxima de entrada de un modelo. Trunca solamente el contexto asignándole el valor `"only_second"` al parámetro `truncation`. 2. A continuación, mapea las posiciones de inicio y fin de la respuesta al contexto original asignándole el valor `True` al parámetro `return_offsets_mapping`. 3. Una vez tengas el mapeo, puedes encontrar los tokens de inicio y fin de la respuesta. Usa el método [`sequence_ids`](https://huggingface.co/docs/tokenizers/python/latest/api/reference.html#tokenizers.Encoding.sequence_ids) para encontrar qué parte de la lista de tokens desplazados corresponde a la pregunta y cuál corresponde al contexto. A continuación puedes ver como se crea una función para truncar y mapear los tokens de inicio y fin de la respuesta al `context`: ```py >>> def preprocess_function(examples): ... questions = [q.strip() for q in examples["question"]] ... inputs = tokenizer( ... questions, ... examples["context"], ... max_length=384, ... truncation="only_second", ... return_offsets_mapping=True, ... padding="max_length", ... ) ... offset_mapping = inputs.pop("offset_mapping") ... answers = examples["answers"] ... start_positions = [] ... end_positions = [] ... for i, offset in enumerate(offset_mapping): ... answer = answers[i] ... start_char = answer["answer_start"][0] ... end_char = answer["answer_start"][0] + len(answer["text"][0]) ... sequence_ids = inputs.sequence_ids(i) ... # Encuentra el inicio y el fin del contexto ... idx = 0 ... while sequence_ids[idx] != 1: ... idx += 1 ... context_start = idx ... while sequence_ids[idx] == 1: ... idx += 1 ... context_end = idx - 1 ... # Si la respuesta entera no está dentro del contexto, etiquétala como (0, 0) ... if offset[context_start][0] > end_char or offset[context_end][1] < start_char: ... start_positions.append(0) ... end_positions.append(0) ... else: ... # De lo contrario, esta es la posición de los tokens de inicio y fin ... idx = context_start ... while idx <= context_end and offset[idx][0] <= start_char: ... idx += 1 ... start_positions.append(idx - 1) ... idx = context_end ... while idx >= context_start and offset[idx][1] >= end_char: ... idx -= 1 ... end_positions.append(idx + 1) ... inputs["start_positions"] = start_positions ... inputs["end_positions"] = end_positions ... return inputs ``` Usa la función [`~datasets.Dataset.map`] de 🤗 Datasets para aplicarle la función de preprocesamiento al dataset entero. Puedes acelerar la función `map` haciendo `batched=True` para procesar varios elementos del dataset a la vez. Quita las columnas que no necesites: ```py >>> tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names) ``` Usa el [`DefaultDataCollator`] para crear un lote de ejemplos. A diferencia de los otros collators de datos en 🤗 Transformers, el `DefaultDataCollator` no aplica ningún procesamiento adicional (como el rellenado). <frameworkcontent> <pt> ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` </pt> <tf> ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") ``` </tf> </frameworkcontent> ## Entrenamiento <frameworkcontent> <pt> Carga el modelo DistilBERT con [`AutoModelForQuestionAnswering`]: ```py >>> from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer >>> model = AutoModelForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> Para familiarizarte con el fine-tuning con [`Trainer`], ¡mira el tutorial básico [aquí](../training#finetune-with-trainer)! </Tip> En este punto, solo quedan tres pasos: 1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`]. 2. Pasarle los argumentos del entrenamiento al [`Trainer`] junto con el modelo, el dataset, el tokenizer y el collator de datos. 3. Invocar el método [`~Trainer.train`] para realizar el fine-tuning del modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... evaluation_strategy="epoch", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_squad["train"], ... eval_dataset=tokenized_squad["validation"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) >>> trainer.train() ``` </pt> <tf> Para realizar el fine-tuning de un modelo en TensorFlow, primero convierte tus datasets al formato `tf.data.Dataset` con el método [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_squad["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_squad["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` <Tip> Para familiarizarte con el fine-tuning con Keras, ¡mira el tutorial básico [aquí](training#finetune-with-keras)! </Tip> Prepara una función de optimización, un programa para la tasa de aprendizaje y algunos hiperparámetros de entrenamiento: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_epochs = 2 >>> total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs >>> optimizer, schedule = create_optimizer( ... init_lr=2e-5, ... num_warmup_steps=0, ... num_train_steps=total_train_steps, ... ) ``` Carga el modelo DistilBERT con [`TFAutoModelForQuestionAnswering`]: ```py >>> from transformers import TFAutoModelForQuestionAnswering >>> model = TFAutoModelForQuestionAnswering("distilbert/distilbert-base-uncased") ``` Configura el modelo para entrenarlo con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` Invoca el método [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3) ``` </tf> </frameworkcontent> <Tip> Para un ejemplo con mayor profundidad de cómo hacer fine-tuning a un modelo para responder preguntas, échale un vistazo al [cuaderno de PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb) o al [cuaderno de TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb) correspondiente. </Tip>
transformers/docs/source/es/tasks/question_answering.md/0
{ "file_path": "transformers/docs/source/es/tasks/question_answering.md", "repo_id": "transformers", "token_count": 3911 }
264
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Tour rapido - local: installation title: Installazione title: Iniziare - sections: - local: pipeline_tutorial title: Pipeline per l'inferenza - local: autoclass_tutorial title: Carica istanze pre-allenate con AutoClass - local: preprocessing title: Preprocess - local: training title: Fine-tuning di un modello pre-addestrato - local: accelerate title: Allenamento distribuito con 🤗 Accelerate - local: model_sharing title: Condividere un modello title: Esercitazione - sections: - local: create_a_model title: Crea un'architettura personalizzata - local: custom_models title: Condividere modelli personalizzati - local: run_scripts title: Addestramento con script - local: multilingual title: Modelli multilingua per l'inferenza - local: converting_tensorflow_models title: Convertire modelli tensorflow - local: serialization title: Esporta modelli Transformers - local: perf_train_cpu title: Addestramento efficiente su CPU - local: perf_train_cpu_many title: Addestramento efficiente su multiple CPU - local: perf_train_tpu title: Addestramento su TPU - local: perf_train_special title: Addestramento su Hardware Specializzato - local: perf_infer_cpu title: Inferenza Efficiente su CPU - local: perf_infer_gpu_one title: Inferenza su una GPU - local: perf_infer_gpu_many title: Inferenza Efficiente su GPU Multiple - local: perf_infer_special title: Inferenza su Hardware Specializzato - local: big_models title: Istanziare un big model - local: migration title: Passaggio da pacchetti precedenti - local: debugging title: Debugging title: Guide pratiche - sections: - local: add_new_pipeline title: Come aggiungere una pipeline a 🤗 Transformers? - local: add_new_model title: Come aggiungere un modello a 🤗 Transformers? - local: perf_hardware title: Hardware ottimizzato per l'addestramento - local: community title: Risorse della comunità - local: pr_checks title: Controlli su una Pull Request title: Guide How-to
transformers/docs/source/it/_toctree.yml/0
{ "file_path": "transformers/docs/source/it/_toctree.yml", "repo_id": "transformers", "token_count": 771 }
265
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hardware ottimizzato per l'addestramento L'hardware utilizzato per eseguire l'addestramento del modello e l'inferenza può avere un grande effetto sulle prestazioni. Per un analisi approfondita delle GPUs, assicurati di dare un'occhiata all'eccellente [blog post](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/) di Tim Dettmer. Diamo un'occhiata ad alcuni consigli pratici per la configurazione della GPU. ## GPU Quando si addestrano modelli più grandi ci sono essenzialmente tre opzioni: - GPUs piu' grandi - Piu' GPUs - Piu' CPU e piu' NVMe (scaricato da [DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)) Iniziamo dal caso in cui ci sia una singola GPU. ### Potenza e Raffreddamento Se hai acquistato una costosa GPU di fascia alta, assicurati di darle la potenza corretta e un raffreddamento sufficiente. **Potenza**: Alcune schede GPU consumer di fascia alta hanno 2 e talvolta 3 prese di alimentazione PCI-E a 8 pin. Assicurati di avere tanti cavi PCI-E a 8 pin indipendenti da 12 V collegati alla scheda quante sono le prese. Non utilizzare le 2 fessure a un'estremità dello stesso cavo (noto anche come cavo a spirale). Cioè se hai 2 prese sulla GPU, vuoi 2 cavi PCI-E a 8 pin che vanno dall'alimentatore alla scheda e non uno che abbia 2 connettori PCI-E a 8 pin alla fine! In caso contrario, non otterrai tutte le prestazioni ufficiali. Ciascun cavo di alimentazione PCI-E a 8 pin deve essere collegato a una guida da 12 V sul lato dell'alimentatore e può fornire fino a 150 W di potenza. Alcune altre schede possono utilizzare connettori PCI-E a 12 pin e questi possono fornire fino a 500-600 W di potenza. Le schede di fascia bassa possono utilizzare connettori a 6 pin, che forniscono fino a 75 W di potenza. Inoltre vuoi un alimentatore (PSU) di fascia alta che abbia una tensione stabile. Alcuni PSU di qualità inferiore potrebbero non fornire alla scheda la tensione stabile di cui ha bisogno per funzionare al massimo. E ovviamente l'alimentatore deve avere abbastanza Watt inutilizzati per alimentare la scheda. **Raffreddamento**: Quando una GPU si surriscalda, inizierà a rallentare e non fornirà le prestazioni mssimali e potrebbe persino spegnersi se diventasse troppo calda. È difficile dire l'esatta temperatura migliore a cui aspirare quando una GPU è molto caricata, ma probabilmente qualsiasi cosa al di sotto di +80°C va bene, ma più bassa è meglio - forse 70-75°C è un intervallo eccellente in cui trovarsi. È probabile che il rallentamento inizi a circa 84-90°C. Ma oltre alla limitazione delle prestazioni, una temperatura molto elevata prolungata è probabile che riduca la durata di una GPU. Diamo quindi un'occhiata a uno degli aspetti più importanti quando si hanno più GPU: la connettività. ### Connettività multi-GPU Se utilizzi più GPU, il modo in cui le schede sono interconnesse può avere un enorme impatto sul tempo totale di allenamento. Se le GPU si trovano sullo stesso nodo fisico, puoi eseguire: ```bash nvidia-smi topo -m ``` e ti dirà come sono interconnesse le GPU. Su una macchina con doppia GPU e collegata a NVLink, molto probabilmente vedrai qualcosa del tipo: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` su una macchina diversa senza NVLink potremmo vedere: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` Il rapporto include questa legenda: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` Quindi il primo rapporto `NV2` ci dice che le GPU sono interconnesse con 2 NVLinks e nel secondo report `PHB` abbiamo una tipica configurazione PCIe+Bridge a livello di consumatore. Controlla che tipo di connettività hai sulla tua configurazione. Alcuni di questi renderanno la comunicazione tra le carte più veloce (es. NVLink), altri più lenta (es. PHB). A seconda del tipo di soluzione di scalabilità utilizzata, la velocità di connettività potrebbe avere un impatto maggiore o minore. Se le GPU devono sincronizzarsi raramente, come in DDP, l'impatto di una connessione più lenta sarà meno significativo. Se le GPU devono scambiarsi messaggi spesso, come in ZeRO-DP, una connettività più veloce diventa estremamente importante per ottenere un addestramento più veloce. #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink) è un collegamento di comunicazione a corto raggio multilinea seriale basato su cavo sviluppato da Nvidia. Ogni nuova generazione fornisce una larghezza di banda più veloce, ad es. ecco una citazione da [Nvidia Ampere GA102 GPU Architecture](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf): > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) Quindi più `X` si ottiene nel rapporto di `NVX` nell'output di `nvidia-smi topo -m`, meglio è. La generazione dipenderà dall'architettura della tua GPU. Confrontiamo l'esecuzione di un training del modello di linguaggio openai-community/gpt2 su un piccolo campione di wikitext I risultati sono: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | Puoi vedere che NVLink completa l'addestramento circa il 23% più velocemente. Nel secondo benchmark utilizziamo `NCCL_P2P_DISABLE=1` per dire alle GPU di non utilizzare NVLink. Ecco il codice benchmark completo e gli output: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/it/perf_hardware.md/0
{ "file_path": "transformers/docs/source/it/perf_hardware.md", "repo_id": "transformers", "token_count": 3024 }
266
- sections: - local: index title: 🤗 Transformers - local: quicktour title: クイックツアー - local: installation title: インストール title: Get started - sections: - local: pipeline_tutorial title: パイプラインを使用して推論を実行する - local: autoclass_tutorial title: AutoClass を使用して移植可能なコードを作成する - local: preprocessing title: データの前処理 - local: training title: 事前トレーニングされたモデルを微調整する - local: run_scripts title: スクリプトを使用してトレーニングする - local: accelerate title: 🤗 Accelerate を使用して分散トレーニングをセットアップする - local: peft title: 🤗 PEFT を使用してアダプターをロードしてトレーニングする - local: model_sharing title: モデルを共有する - local: transformers_agents title: エージェント - local: llm_tutorial title: LLM を使用した生成 title: Tutorials - sections: - isExpanded: false sections: - local: tasks/sequence_classification title: テキストの分類 - local: tasks/token_classification title: トークンの分類 - local: tasks/question_answering title: 質疑応答 - local: tasks/language_modeling title: 因果言語モデリング - local: tasks/masked_language_modeling title: マスクされた言語モデリング - local: tasks/translation title: 翻訳 - local: tasks/summarization title: 要約 - local: tasks/multiple_choice title: 複数の選択肢 title: 自然言語処理 - isExpanded: false sections: - local: tasks/audio_classification title: 音声の分類 - local: tasks/asr title: 自動音声認識 title: オーディオ - isExpanded: false sections: - local: tasks/image_classification title: 画像分類 - local: tasks/semantic_segmentation title: セマンティックセグメンテーション - local: tasks/video_classification title: ビデオの分類 - local: tasks/object_detection title: 物体検出 - local: tasks/zero_shot_object_detection title: ゼロショット物体検出 - local: tasks/zero_shot_image_classification title: ゼロショット画像分類 - local: tasks/monocular_depth_estimation title: 深さの推定 - local: tasks/image_to_image title: 画像から画像へ - local: tasks/knowledge_distillation_for_image_classification title: コンピュータビジョンのための知識の蒸留 title: コンピュータビジョン - isExpanded: false sections: - local: tasks/image_captioning title: 画像のキャプション - local: tasks/document_question_answering title: 文書の質問への回答 - local: tasks/visual_question_answering title: 視覚的な質問への回答 - local: tasks/text-to-speech title: テキスト読み上げ title: マルチモーダル - isExpanded: false sections: - local: generation_strategies title: 生成戦略をカスタマイズする title: 世代 - isExpanded: false sections: - local: tasks/idefics title: IDEFICS を使用したイメージ タスク - local: tasks/prompting title: LLM プロンプト ガイド title: プロンプト title: Task Guides - sections: - local: fast_tokenizers title: 🤗 トークナイザーの高速トークナイザーを使用する - local: multilingual title: 多言語モデルで推論を実行する - local: create_a_model title: モデル固有の API を使用する - local: custom_models title: カスタムモデルを共有する - local: chat_templating title: チャットモデルのテンプレート - local: serialization title: ONNX へのエクスポート - local: tflite title: TFLite へのエクスポート - local: torchscript title: トーチスクリプトへのエクスポート - local: benchmarks title: ベンチマーク - local: community title: コミュニティリソース - local: custom_tools title: カスタムツールとプロンプト - local: troubleshooting title: トラブルシューティング title: 開発者ガイド - sections: - local: performance title: 概要 - sections: - local: perf_train_gpu_one title: 単一の GPU で効率的にトレーニングするための方法とツール - local: perf_train_gpu_many title: 複数の GPU と並列処理 - local: perf_train_cpu title: CPU での効率的なトレーニング - local: perf_train_cpu_many title: 分散CPUトレーニング - local: perf_train_tpu title: TPU に関するトレーニング - local: perf_train_tpu_tf title: TensorFlow を使用した TPU のトレーニング - local: perf_train_special title: 特殊なハードウェアに関するトレーニング - local: perf_hardware title: トレーニング用のカスタム ハードウェア - local: hpo_train title: Trainer API を使用したハイパーパラメータ検索 title: 効率的なトレーニングテクニック - sections: - local: perf_infer_cpu title: CPUでの推論 - local: perf_infer_gpu_one title: 1 つの GPU での推論 - local: perf_infer_gpu_many title: 多くの GPU での推論 - local: perf_infer_special title: 特殊なハードウェアでの推論 title: 推論の最適化 - local: big_models title: 大きなモデルのインスタンス化 - local: tf_xla title: TensorFlowモデルのXLA統合 - local: perf_torch_compile title: torch.compile()を使用した推論の最適化 title: パフォーマンスとスケーラビリティ - sections: - local: add_new_model title: 🤗 Transformersにモデルを追加する方法 - local: add_tensorflow_model title: 🤗 TransformersモデルをTensorFlowに変換する方法 - local: testing title: テスト - local: pr_checks title: プルリクエストのチェック title: 貢献する - sections: - local: philosophy title: フィロソフィー - local: glossary title: 用語集 - local: task_summary title: 🤗 Transformersの機能 - local: tasks_explained title: 🤗 Transformersがタスクを解決する方法 - local: model_summary title: Transformerモデルファミリー - local: tokenizer_summary title: トークナイザーの概要 - local: attention title: 注意機構 - local: pad_truncation title: パディングと切り詰め - local: bertology title: BERTology - local: perplexity title: 固定長モデルのパープレキシティ - local: pipeline_webserver title: Webサーバー推論用パイプライン - local: model_memory_anatomy title: モデルトレーニングの解剖学 title: コンセプチュアルガイド - sections: - sections: - local: main_classes/agent title: エージェントとツール - local: model_doc/auto title: Auto Classes - local: main_classes/callback title: コールバック - local: main_classes/configuration title: 構成 - local: main_classes/data_collator title: データ照合者 - local: main_classes/keras_callbacks title: Keras コールバック - local: main_classes/logging title: ロギング - local: main_classes/model title: モデル - local: main_classes/text_generation title: テキストの生成 - local: main_classes/onnx title: ONNX - local: main_classes/optimizer_schedules title: 最適化 - local: main_classes/output title: モデルの出力 - local: main_classes/pipelines title: パイプライン - local: main_classes/processors title: プロセッサー - local: main_classes/quantization title: 量子化 - local: main_classes/tokenizer title: トークナイザー - local: main_classes/trainer title: トレーナー - local: main_classes/deepspeed title: ディープスピードの統合 - local: main_classes/feature_extractor title: 特徴抽出器 - local: main_classes/image_processor title: 画像処理プロセッサ title: 主要なクラス - sections: - isExpanded: false sections: - local: model_doc/albert title: ALBERT - local: model_doc/bart title: BART - local: model_doc/barthez title: BARThez - local: model_doc/bartpho title: BARTpho - local: model_doc/bert title: BERT - local: model_doc/bert-generation title: BertGeneration - local: model_doc/bert-japanese title: BertJapanese - local: model_doc/bertweet title: Bertweet - local: model_doc/big_bird title: BigBird - local: model_doc/bigbird_pegasus title: BigBirdPegasus - local: model_doc/biogpt title: BioGpt - local: model_doc/blenderbot title: Blenderbot - local: model_doc/blenderbot-small title: Blenderbot Small - local: model_doc/bloom title: BLOOM - local: model_doc/bort title: BORT - local: model_doc/byt5 title: ByT5 - local: model_doc/camembert title: CamemBERT - local: model_doc/canine title: CANINE - local: model_doc/codegen title: CodeGen - local: model_doc/code_llama title: CodeLlama - local: model_doc/convbert title: ConvBERT - local: model_doc/cpm title: CPM - local: model_doc/cpmant title: CPMANT - local: model_doc/ctrl title: CTRL - local: model_doc/deberta title: DeBERTa - local: model_doc/deberta-v2 title: DeBERTa-v2 - local: model_doc/dialogpt title: DialoGPT title: 文章モデル - isExpanded: false sections: - local: model_doc/beit title: BEiT - local: model_doc/bit title: BiT - local: model_doc/conditional_detr title: Conditional DETR - local: model_doc/convnext title: ConvNeXT - local: model_doc/convnextv2 title: ConvNeXTV2 - local: model_doc/cvt title: CvT - local: model_doc/deformable_detr title: Deformable DETR - local: model_doc/deit title: DeiT - local: model_doc/deta title: DETA - local: model_doc/detr title: DETR - local: model_doc/dinat title: DiNAT title: ビジョンモデル - isExpanded: false sections: - local: model_doc/audio-spectrogram-transformer title: Audio Spectrogram Transformer - local: model_doc/bark title: Bark - local: model_doc/clap title: CLAP title: 音声モデル - isExpanded: false sections: - local: model_doc/align title: ALIGN - local: model_doc/altclip title: AltCLIP - local: model_doc/blip title: BLIP - local: model_doc/blip-2 title: BLIP-2 - local: model_doc/bridgetower title: BridgeTower - local: model_doc/bros title: BROS - local: model_doc/chinese_clip title: Chinese-CLIP - local: model_doc/clip title: CLIP - local: model_doc/clipseg title: CLIPSeg - local: model_doc/clvp title: CLVP - local: model_doc/data2vec title: Data2Vec - local: model_doc/deplot title: DePlot title: マルチモーダルモデル - isExpanded: false sections: - local: model_doc/decision_transformer title: Decision Transformer title: 強化学習モデル - isExpanded: false sections: - local: model_doc/autoformer title: Autoformer title: 時系列モデル title: モデル - sections: - local: internal/modeling_utils title: カスタムレイヤーとユーティリティ - local: internal/pipelines_utils title: パイプライン用のユーティリティ - local: internal/tokenization_utils title: ト=ークナイザー用のユーティリティ - local: internal/trainer_utils title: トレーナー用ユーティリティ - local: internal/generation_utils title: 発電用ユーティリティ - local: internal/image_processing_utils title: 画像プロセッサ用ユーティリティ - local: internal/audio_utils title: オーディオ処理用のユーティリティ - local: internal/file_utils title: 一般公共事業 - local: internal/time_series_utils title: 時系列用のユーティリティ title: 内部ヘルパー title: API
transformers/docs/source/ja/_toctree.yml/0
{ "file_path": "transformers/docs/source/ja/_toctree.yml", "repo_id": "transformers", "token_count": 5823 }
267
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Glossary この用語集は、一般的な機械学習と 🤗 トランスフォーマーの用語を定義し、ドキュメンテーションをより理解するのに役立ちます。 ## A ### attention mask アテンション マスクは、シーケンスをバッチ処理する際に使用されるオプションの引数です。 <Youtube id="M6adb1j2jPI"/> この引数は、モデルにどのトークンを注視すべきか、どのトークンを注視しないかを示します。 例えば、次の2つのシーケンスを考えてみてください: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "This is a short sequence." >>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A." >>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"] >>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"] ``` The encoded versions have different lengths: ```python >>> len(encoded_sequence_a), len(encoded_sequence_b) (8, 19) ``` したがって、これらのシーケンスをそのまま同じテンソルに配置することはできません。最初のシーケンスは、 2番目のシーケンスの長さに合わせてパディングする必要があります。または、2番目のシーケンスは、最初のシーケンスの 長さに切り詰める必要があります。 最初の場合、IDのリストはパディングインデックスで拡張されます。トークナイザにリストを渡し、次のようにパディングするように 依頼できます: ```python >>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True) ``` 0sが追加されて、最初の文が2番目の文と同じ長さになるのがわかります: ```python >>> padded_sequences["input_ids"] [[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]] ``` これは、PyTorchまたはTensorFlowでテンソルに変換できます。注意マスクは、モデルがそれらに注意を払わないように、埋め込まれたインデックスの位置を示すバイナリテンソルです。[`BertTokenizer`]では、`1`は注意を払う必要がある値を示し、`0`は埋め込まれた値を示します。この注意マスクは、トークナイザが返す辞書のキー「attention_mask」の下にあります。 ```python >>> padded_sequences["attention_mask"] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ``` ### autoencoding models [エンコーダーモデル](#encoder-models) および [マスク言語モデリング](#masked-language-modeling-mlm) を参照してください。 ### autoregressive models [因果言語モデリング](#causal-language-modeling) および [デコーダーモデル](#decoder-models) を参照してください。 ## B ### backbone バックボーンは、生の隠れた状態や特徴を出力するネットワーク(埋め込みと層)です。通常、特徴を入力として受け取るために [ヘッド](#head) に接続されており、予測を行います。たとえば、[`ViTModel`] は特定のヘッドが上にないバックボーンです。他のモデルも [`VitModel`] をバックボーンとして使用できます、例えば [DPT](model_doc/dpt) です。 ## C ### causal language modeling モデルがテキストを順番に読み、次の単語を予測する事前トレーニングタスクです。通常、モデルは文全体を読み取りますが、特定のタイムステップで未来のトークンを隠すためにモデル内でマスクを使用します。 ### channel カラー画像は、赤、緑、青(RGB)の3つのチャネルの値の組み合わせから成り立っており、グレースケール画像は1つのチャネルしか持ちません。🤗 Transformers では、チャネルは画像のテンソルの最初または最後の次元になることがあります:[`n_channels`, `height`, `width`] または [`height`, `width`, `n_channels`]。 ### connectionist temporal classification (CTC) 入力と出力が正確にどのように整列するかを正確に知らなくてもモデルを学習させるアルゴリズム。CTC は、特定の入力に対してすべての可能な出力の分布を計算し、その中から最も可能性の高い出力を選択します。CTC は、スピーカーの異なる発話速度など、さまざまな理由で音声がトランスクリプトと完全に整合しない場合に、音声認識タスクで一般的に使用されます。 ### convolution ニューラルネットワークの一種で、入力行列が要素ごとに小さな行列(カーネルまたはフィルター)と乗算され、値が新しい行列に合計されるレイヤーのタイプ。これは入力行列全体に対して繰り返される畳み込み操作として知られ、各操作は入力行列の異なるセグメントに適用されます。畳み込みニューラルネットワーク(CNN)は、コンピュータビジョンで一般的に使用されています。 ## D ### decoder input IDs この入力はエンコーダーデコーダーモデルに特有であり、デコーダーに供給される入力IDを含みます。これらの入力は、翻訳や要約などのシーケンスツーシーケンスタスクに使用され、通常、各モデルに固有の方法で構築されます。 ほとんどのエンコーダーデコーダーモデル(BART、T5)は、`labels` から独自に `decoder_input_ids` を作成します。このようなモデルでは、`labels` を渡すことがトレーニングを処理する優れた方法です。 シーケンスツーシーケンストレーニングにおけるこれらの入力IDの処理方法を確認するために、各モデルのドキュメントを確認してください。 ### decoder models オートリグレッションモデルとも呼ばれ、モデルがテキストを順番に読み、次の単語を予測する事前トレーニングタスク(因果言語モデリング)に関与します。通常、モデルは文全体を読み取り、特定のタイムステップで未来のトークンを隠すマスクを使用して行われます。 <Youtube id="d_ixlCubqQw"/> ### deep learning (DL) ニューラルネットワークを使用する機械学習アルゴリズムで、複数の層を持っています。 ## E ### encoder models オートエンコーディングモデルとしても知られており、エンコーダーモデルは入力(テキストや画像など)を、埋め込みと呼ばれる簡略化された数値表現に変換します。エンコーダーモデルは、しばしば[マスクされた言語モデリング(#masked-language-modeling-mlm)](#masked-language-modeling-mlm)などの技術を使用して事前にトレーニングされ、入力シーケンスの一部をマスクし、モデルにより意味のある表現を作成することが強制されます。 <Youtube id="H39Z_720T5s"/> ## F ### feature extraction 生データをより情報豊かで機械学習アルゴリズムにとって有用な特徴のセットに選択および変換するプロセス。特徴抽出の例には、生のテキストを単語埋め込みに変換したり、画像/ビデオデータからエッジや形状などの重要な特徴を抽出したりすることが含まれます。 ### feed forward chunking トランスフォーマー内の各残差注意ブロックでは、通常、自己注意層の後に2つのフィードフォワード層が続きます。 フィードフォワード層の中間埋め込みサイズは、モデルの隠れたサイズよりも大きいことがよくあります(たとえば、`google-bert/bert-base-uncased`の場合)。 入力サイズが `[batch_size、sequence_length]` の場合、中間フィードフォワード埋め込み `[batch_size、sequence_length、config.intermediate_size]` を保存するために必要なメモリは、メモリの大部分を占めることがあります。[Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451)の著者は、計算が `sequence_length` 次元に依存しないため、両方のフィードフォワード層の出力埋め込み `[batch_size、config.hidden_size]_0、...、[batch_size、config.hidden_size]_n` を個別に計算し、後で `[batch_size、sequence_length、config.hidden_size]` に連結することは数学的に等価であると気付きました。これにより、増加した計算時間とメモリ使用量のトレードオフが生じますが、数学的に等価な結果が得られます。 [`apply_chunking_to_forward`] 関数を使用するモデルの場合、`chunk_size` は並列に計算される出力埋め込みの数を定義し、メモリと時間の複雑さのトレードオフを定義します。`chunk_size` が 0 に設定されている場合、フィードフォワードのチャンキングは行われません。 ### finetuned models ファインチューニングは、事前にトレーニングされたモデルを取り、その重みを固定し、新しく追加された[model head](#head)で出力レイヤーを置き換える形式の転移学習です。モデルヘッドは対象のデータセットでトレーニングされます。 詳細については、[Fine-tune a pretrained model](https://huggingface.co/docs/transformers/training) チュートリアルを参照して、🤗 Transformersを使用したモデルのファインチューニング方法を学びましょう。 ## H ### head モデルヘッドは、ニューラルネットワークの最後のレイヤーを指し、生の隠れた状態を受け入れて異なる次元に射影します。各タスクに対して異なるモデルヘッドがあります。例えば: * [`GPT2ForSequenceClassification`] は、ベースの[`GPT2Model`]の上にあるシーケンス分類ヘッド(線形層)です。 * [`ViTForImageClassification`] は、ベースの[`ViTModel`]の`CLS`トークンの最終隠れた状態の上にある画像分類ヘッド(線形層)です。 * [`Wav2Vec2ForCTC`] は、[CTC](#connectionist-temporal-classification-ctc)を持つベースの[`Wav2Vec2Model`]の言語モデリングヘッドです。 ## I ### image patch ビジョンベースのトランスフォーマーモデルは、画像をより小さなパッチに分割し、それらを線形に埋め込み、モデルにシーケンスとして渡します。モデルの ### inference 推論は、トレーニングが完了した後に新しいデータでモデルを評価するプロセスです。 🤗 Transformers を使用して推論を実行する方法については、[推論のパイプライン](https://huggingface.co/docs/transformers/pipeline_tutorial) チュートリアルを参照してください。 ### input IDs 入力IDは、モデルへの入力として渡す必要があるパラメーターの中で最も一般的なものです。これらはトークンのインデックスであり、モデルによって入力として使用されるシーケンスを構築するトークンの数値表現です。 <Youtube id="VFp38yj8h3A"/> 各トークナイザーは異なる方法で動作しますが、基本的なメカニズムは同じです。以下はBERTトークナイザーを使用した例です。BERTトークナイザーは[WordPiece](https://arxiv.org/pdf/1609.08144.pdf)トークナイザーです。 ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence = "A Titan RTX has 24GB of VRAM" ``` トークナイザーは、シーケンスをトークナイザー語彙で使用可能なトークンに分割します。 ```python >>> tokenized_sequence = tokenizer.tokenize(sequence) ``` トークンは単語またはサブワードです。 たとえば、ここでは "VRAM" はモデルの語彙に含まれていなかったため、"V"、"RA"、"M" に分割されました。 これらのトークンが別々の単語ではなく、同じ単語の一部であることを示すために、"RA" と "M" にはダブルハッシュのプレフィックスが追加されます。 ```python >>> print(tokenized_sequence) ['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M'] ``` これらのトークンは、モデルが理解できるようにIDに変換できます。これは、文をトークナイザーに直接供給して行うことができます。トークナイザーは、パフォーマンスの向上のために[🤗 Tokenizers](https://github.com/huggingface/tokenizers)のRust実装を活用しています。 ```python >>> inputs = tokenizer(sequence) ``` トークナイザーは、対応するモデルが正しく動作するために必要なすべての引数を含む辞書を返します。トークンのインデックスは、キー `input_ids` の下にあります。 ```python >>> encoded_sequence = inputs["input_ids"] >>> print(encoded_sequence) [101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102] ``` 注意:トークナイザは、関連するモデルがそれらを必要とする場合に自動的に「特別なトークン」を追加します。これらは、モデルが時折使用する特別なIDです。 前のIDシーケンスをデコードする場合、 ```python >>> decoded_sequence = tokenizer.decode(encoded_sequence) ``` 私たちは見ます ```python >>> print(decoded_sequence) [CLS] A Titan RTX has 24GB of VRAM [SEP] ``` これは[`BertModel`]がその入力を期待する方法です。 ## L ### Labels ラベルは、モデルが損失を計算するために渡すことができるオプションの引数です。これらのラベルは、モデルの予測の期待値であるべきです。モデルは、通常の損失を使用して、その予測と期待値(ラベル)との間の損失を計算します。 これらのラベルはモデルのヘッドに応じて異なります。たとえば: - シーケンス分類モデル([`BertForSequenceClassification`])の場合、モデルは次元が `(batch_size)` のテンソルを期待し、バッチ内の各値がシーケンス全体の予測ラベルに対応します。 - トークン分類モデル([`BertForTokenClassification`])の場合、モデルは次元が `(batch_size, seq_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。 - マスク言語モデリングの場合([`BertForMaskedLM`])、モデルは次元が `(batch_size, seq_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。ここでのラベルはマスクされたトークンのトークンIDであり、他のトークンには通常 -100 などの値が設定されます。 - シーケンス間のタスクの場合([`BartForConditionalGeneration`]、[`MBartForConditionalGeneration`])、モデルは次元が `(batch_size, tgt_seq_length)` のテンソルを期待し、各値が各入力シーケンスに関連付けられたターゲットシーケンスに対応します。トレーニング中、BARTとT5の両方は適切な `decoder_input_ids` とデコーダーのアテンションマスクを内部で生成します。通常、これらを提供する必要はありません。これはエンコーダーデコーダーフレームワークを利用するモデルには適用されません。 - 画像分類モデルの場合([`ViTForImageClassification`])、モデルは次元が `(batch_size)` のテンソルを期待し、バッチ内の各値が各個々の画像の予測ラベルに対応します。 - セマンティックセグメンテーションモデルの場合([`SegformerForSemanticSegmentation`])、モデルは次元が `(batch_size, height, width)` のテンソルを期待し、バッチ内の各値が各個々のピクセルの予測ラベルに対応します。 - 物体検出モデルの場合([`DetrForObjectDetection`])、モデルは各個々の画像の予測ラベルと境界ボックスの数に対応する `class_labels` と `boxes` キーを持つ辞書のリストを期待します。 - 自動音声認識モデルの場合([`Wav2Vec2ForCTC`])、モデルは次元が `(batch_size, target_length)` のテンソルを期待し、各値が各個々のトークンの予測ラベルに対応します。 <Tip> 各モデルのラベルは異なる場合があるため、常に各モデルのドキュメントを確認して、それらの特定のラベルに関する詳細情報を確認してください! </Tip> ベースモデル([`BertModel`])はラベルを受け入れません。これらはベースのトランスフォーマーモデルであり、単に特徴を出力します。 ### large language models (LLM) 大量のデータでトレーニングされた変換器言語モデル(GPT-3、BLOOM、OPT)を指す一般的な用語です。これらのモデルは通常、多くの学習可能なパラメータを持っています(たとえば、GPT-3の場合、1750億個)。 ## M ### masked language modeling (MLM) モデルはテキストの破損バージョンを見る事前トレーニングタスクで、通常はランダムに一部のトークンをマスキングして元のテキストを予測する必要があります。 ### multimodal テキストと別の種類の入力(たとえば画像)を組み合わせるタスクです。 ## N ### Natural language generation (NLG) テキストを生成する関連するすべてのタスク(たとえば、[Transformersで書く](https://transformer.huggingface.co/)、翻訳など)。 ### Natural language processing (NLP) テキストを扱う方法を一般的に表現したものです。 ### Natural language understanding (NLU) テキスト内に何があるかを理解する関連するすべてのタスク(たとえば、テキスト全体の分類、個々の単語の分類など)。 ## P ### pipeline 🤗 Transformersのパイプラインは、データの前処理と変換を特定の順序で実行してデータを処理し、モデルから予測を返す一連のステップを指す抽象化です。パイプラインに見られるいくつかのステージの例には、データの前処理、特徴抽出、正規化などがあります。 詳細については、[推論のためのパイプライン](https://huggingface.co/docs/transformers/pipeline_tutorial)を参照してください。 ### pixel values モデルに渡される画像の数値表現のテンソルです。ピクセル値は、形状が [`バッチサイズ`, `チャネル数`, `高さ`, `幅`] の行列で、画像プロセッサから生成されます。 ### pooling 行列を小さな行列に縮小する操作で、プール対象の次元の最大値または平均値を取ることが一般的です。プーリングレイヤーは一般的に畳み込みレイヤーの間に見られ、特徴表現をダウンサンプリングします。 ### position IDs トークンごとの位置が埋め込まれているRNNとは異なり、トランスフォーマーは各トークンの位置を把握していません。したがって、モデルはトークンの位置を識別するために位置ID(`position_ids`)を使用します。 これはオプションのパラメータです。モデルに `position_ids` が渡されない場合、IDは自動的に絶対的な位置埋め込みとして作成されます。 絶対的な位置埋め込みは範囲 `[0、config.max_position_embeddings - 1]` から選択されます。一部のモデルは、正弦波位置埋め込みや相対位置埋め込みなど、他のタイプの位置埋め込みを使用することがあります。 ### preprocessing 生データを機械学習モデルで簡単に処理できる形式に準備するタスクです。例えば、テキストは通常、トークン化によって前処理されます。他の入力タイプに対する前処理の具体的な方法を知りたい場合は、[Preprocess](https://huggingface.co/docs/transformers/preprocessing) チュートリアルをご覧ください。 ### pretrained model あるデータ(たとえば、Wikipedia全体など)で事前に学習されたモデルです。事前学習の方法には、自己教師ありの目的が含まれ、テキストを読み取り、次の単語を予測しようとするもの([因果言語モデリング](#causal-language-modeling)を参照)や、一部の単語をマスクし、それらを予測しようとするもの([マスク言語モデリング](#masked-language-modeling-mlm)を参照)があります。 音声とビジョンモデルには独自の事前学習の目的があります。たとえば、Wav2Vec2は音声モデルで、モデルに対して「真の」音声表現を偽の音声表現のセットから識別する必要がある対比的なタスクで事前学習されています。一方、BEiTはビジョンモデルで、一部の画像パッチをマスクし、モデルにマスクされたパッチを予測させるタスク(マスク言語モデリングの目的と似ています)で事前学習されています。 ## R ### recurrent neural network (RNN) テキストを処理するために層をループさせるモデルの一種です。 ### representation learning 生データの意味のある表現を学習する機械学習のサブフィールドです。表現学習の技術の一部には単語埋め込み、オートエンコーダー、Generative Adversarial Networks(GANs)などがあります。 ## S ### sampling rate 秒ごとに取られるサンプル(オーディオ信号など)の数をヘルツ単位で測定したものです。サンプリングレートは音声などの連続信号を離散化する結果です。 ### self-attention 入力の各要素は、どの他の要素に注意を払うべきかを検出します。 ### self-supervised learning モデルがラベルのないデータから自分自身の学習目標を作成する機械学習技術のカテゴリです。これは[教師なし学習](#unsupervised-learning)や[教師あり学習](#supervised-learning)とは異なり、学習プロセスはユーザーからは明示的には監督されていない点が異なります。 自己教師あり学習の1つの例は[マスク言語モデリング](#masked-language-modeling-mlm)で、モデルには一部のトークンが削除された文が与えられ、欠落したトークンを予測するように学習します。 ### semi-supervised learning ラベル付きデータの少量とラベルのないデータの大量を組み合わせてモデルの精度を向上させる広範な機械学習トレーニング技術のカテゴリです。[教師あり学習](#supervised-learning)や[教師なし学習](#unsupervised-learning)とは異なり、半教師あり学習のアプローチの1つは「セルフトレーニング」であり、モデルはラベル付きデータでトレーニングされ、次にラベルのないデータで予測を行います。モデルが最も自信を持って予測する部分がラベル付きデータセットに追加され、モデルの再トレーニングに使用されます。 ### sequence-to-sequence (seq2seq) 入力から新しいシーケンスを生成するモデルです。翻訳モデルや要約モデル([Bart](model_doc/bart)や[T5](model_doc/t5)など)などがこれに該当します。 ### stride [畳み込み](#convolution)または[プーリング](#pooling)において、ストライドはカーネルが行列上で移動する距離を指します。ストライドが1の場合、カーネルは1ピクセルずつ移動し、ストライドが2の場合、カーネルは2ピクセルずつ移動します。 ### supervised learning モデルのトレーニング方法の一つで、直接ラベル付きデータを使用してモデルの性能を修正し指導します。データがトレーニングされているモデルに供給され、その予測が既知のラベルと比較されます。モデルは予測がどれだけ誤っていたかに基づいて重みを更新し、プロセスはモデルの性能を最適化するために繰り返されます。 ## T ### token 文の一部であり、通常は単語ですが、サブワード(一般的でない単語はしばしばサブワードに分割されることがあります)または句読点の記号であることもあります。 ### token Type IDs 一部のモデルは、文のペアの分類や質問応答を行うことを目的としています。 <Youtube id="0u3ioSwev3s"/> これには異なる2つのシーケンスを単一の「input_ids」エントリに結合する必要があり、通常は分類子(`[CLS]`)や区切り記号(`[SEP]`)などの特別なトークンの助けを借りて実行されます。例えば、BERTモデルは次のように2つのシーケンス入力を構築します: 日本語訳を提供していただきたいです。Markdown形式で記述してください。 ```python >>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP] ``` 我々は、前述のように、2つのシーケンスを2つの引数として `tokenizer` に渡すことで、このような文を自動的に生成することができます(以前のようにリストではなく)。以下のように: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "HuggingFace is based in NYC" >>> sequence_b = "Where is HuggingFace based?" >>> encoded_dict = tokenizer(sequence_a, sequence_b) >>> decoded = tokenizer.decode(encoded_dict["input_ids"]) ``` これに対応するコードは以下です: ```python >>> print(decoded) [CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP] ``` 一部のモデルでは、1つのシーケンスがどこで終わり、別のシーケンスがどこで始まるかを理解するのに十分な情報が備わっています。ただし、BERTなどの他のモデルでは、トークンタイプID(セグメントIDとも呼ばれる)も使用されています。これは、モデル内の2つのシーケンスを識別するバイナリマスクとして表されます。 トークナイザは、このマスクを「token_type_ids」として返します。 ```python >>> encoded_dict["token_type_ids"] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` 最初のシーケンス、つまり質問のために使用される「コンテキスト」は、すべてのトークンが「0」で表されています。一方、2番目のシーケンス、質問に対応するものは、すべてのトークンが「1」で表されています。 一部のモデル、例えば [`XLNetModel`] のように、追加のトークンが「2」で表されます。 ### transfer learning 事前に学習されたモデルを取り、それをタスク固有のデータセットに適応させる技術。ゼロからモデルを訓練する代わりに、既存のモデルから得た知識を出発点として活用できます。これにより学習プロセスが加速し、必要な訓練データの量が減少します。 ### transformer 自己注意ベースの深層学習モデルアーキテクチャ。 ## U ### unsupervised learning モデルに提供されるデータがラベル付けされていないモデルトレーニングの形態。教師なし学習の技術は、タスクに役立つパターンを見つけるためにデータ分布の統計情報を活用します。
transformers/docs/source/ja/glossary.md/0
{ "file_path": "transformers/docs/source/ja/glossary.md", "repo_id": "transformers", "token_count": 12796 }
268
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Trainer [`Trainer`] クラスは、ほとんどの標準的なユースケースに対して、PyTorch で機能を完全にトレーニングするための API を提供します。これは、[サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples) のほとんどで使用されています。 [`Trainer`] をインスタンス化する前に、トレーニング中にカスタマイズのすべてのポイントにアクセスするために [`TrainingArguments`] を作成します。 この API は、複数の GPU/TPU での分散トレーニング、[NVIDIA Apex](https://github.com/NVIDIA/apex) および PyTorch のネイティブ AMP による混合精度をサポートします。 [`Trainer`] には、上記の機能をサポートする基本的なトレーニング ループが含まれています。カスタム動作を挿入するには、それらをサブクラス化し、次のメソッドをオーバーライドします。 - **get_train_dataloader** -- トレーニング データローダーを作成します。 - **get_eval_dataloader** -- 評価用データローダーを作成します。 - **get_test_dataloader** -- テスト データローダーを作成します。 - **log** -- トレーニングを監視しているさまざまなオブジェクトに関する情報をログに記録します。 - **create_optimizer_and_scheduler** -- オプティマイザと学習率スケジューラが渡されなかった場合にセットアップします。 初期化。 `create_optimizer`メソッドと`create_scheduler`メソッドをサブクラス化またはオーバーライドすることもできることに注意してください。 別々に。 - **create_optimizer** -- init で渡されなかった場合にオプティマイザーをセットアップします。 - **create_scheduler** -- init で渡されなかった場合、学習率スケジューラを設定します。 - **compute_loss** - トレーニング入力のバッチの損失を計算します。 - **training_step** -- トレーニング ステップを実行します。 - **prediction_step** -- 評価/テスト ステップを実行します。 - **evaluate** -- 評価ループを実行し、メトリクスを返します。 - **predict** -- テスト セットの予測 (ラベルが使用可能な場合はメトリクスも含む) を返します。 <Tip warning={true}> [`Trainer`] クラスは 🤗 Transformers モデル用に最適化されており、驚くべき動作をする可能性があります 他の機種で使用する場合。独自のモデルで使用する場合は、次の点を確認してください。 - モデルは常に [`~utils.ModelOutput`] のタプルまたはサブクラスを返します。 - `labels` 引数が指定され、その損失が最初の値として返される場合、モデルは損失を計算できます。 タプルの要素 (モデルがタプルを返す場合) - モデルは複数のラベル引数を受け入れることができます ([`TrainingArguments`] で `label_names` を使用して、その名前を [`Trainer`] に示します) が、それらのいずれにも `"label"` という名前を付ける必要はありません。 </Tip> 以下は、加重損失を使用するように [`Trainer`] をカスタマイズする方法の例です (不均衡なトレーニング セットがある場合に役立ちます)。 ```python from torch import nn from transformers import Trainer class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): labels = inputs.pop("labels") # forward pass outputs = model(**inputs) logits = outputs.get("logits") # compute custom loss (suppose one has 3 labels with different weights) loss_fct = nn.CrossEntropyLoss(weight=torch.tensor([1.0, 2.0, 3.0], device=model.device)) loss = loss_fct(logits.view(-1, self.model.config.num_labels), labels.view(-1)) return (loss, outputs) if return_outputs else loss ``` PyTorch [`Trainer`] のトレーニング ループの動作をカスタマイズするもう 1 つの方法は、トレーニング ループの状態を検査できる [callbacks](コールバック) を使用することです (進行状況レポート、TensorBoard または他の ML プラットフォームでのログ記録など)。決定(早期停止など)。 ## Trainer [[autodoc]] Trainer - all ## Seq2SeqTrainer [[autodoc]] Seq2SeqTrainer - evaluate - predict ## TrainingArguments [[autodoc]] TrainingArguments - all ## Seq2SeqTrainingArguments [[autodoc]] Seq2SeqTrainingArguments - all ## Checkpoints デフォルトでは、[`Trainer`] はすべてのチェックポイントを、 [`TrainingArguments`] を使用しています。これらは、xxx を含む`checkpoint-xxx`という名前のサブフォルダーに保存されます。 それはトレーニングの段階でした。 チェックポイントからトレーニングを再開するには、次のいずれかを使用して [`Trainer.train`] を呼び出します。 - `resume_from_checkpoint=True` は最新のチェックポイントからトレーニングを再開します - `resume_from_checkpoint=checkpoint_dir` ディレクトリ内の特定のチェックポイントからトレーニングを再開します 合格した。 さらに、`push_to_hub=True` を使用すると、モデル ハブにチェックポイントを簡単に保存できます。デフォルトでは、すべて 中間チェックポイントに保存されたモデルは別のコミットに保存されますが、オプティマイザーの状態は保存されません。適応できます [`TrainingArguments`] の `hub-strategy` 値を次のいずれかにします。 - `"checkpoint"`: 最新のチェックポイントも last-checkpoint という名前のサブフォルダーにプッシュされます。 `trainer.train(resume_from_checkpoint="output_dir/last-checkpoint")` を使用してトレーニングを簡単に再開します。 - `"all_checkpoints"`: すべてのチェックポイントは、出力フォルダーに表示されるようにプッシュされます (したがって、1 つのチェックポイントが得られます) 最終リポジトリ内のフォルダーごとのチェックポイント フォルダー) ## Logging デフォルトでは、[`Trainer`] はメインプロセスに `logging.INFO` を使用し、レプリカがある場合には `logging.WARNING` を使用します。 これらのデフォルトは、[`TrainingArguments`] の 5 つの `logging` レベルのいずれかを使用するようにオーバーライドできます。 引数: - `log_level` - メインプロセス用 - `log_level_replica` - レプリカ用 さらに、[`TrainingArguments`] の `log_on_each_node` が `False` に設定されている場合、メイン ノードのみが メイン プロセスのログ レベル設定を使用すると、他のすべてのノードはレプリカのログ レベル設定を使用します。 [`Trainer`] は、`transformers` のログ レベルをノードごとに個別に設定することに注意してください。 [`Trainer.__init__`]。したがって、他の機能を利用する場合は、これをより早く設定することをお勧めします (次の例を参照)。 [`Trainer`] オブジェクトを作成する前の `transformers` 機能。 これをアプリケーションで使用する方法の例を次に示します。 ```python [...] logger = logging.getLogger(__name__) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) # set the main code and the modules it uses to the same log-level according to the node log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) trainer = Trainer(...) ``` そして、メイン ノードと他のすべてのノードで重複する可能性が高いものを出力しないように警告するだけを表示したい場合は、 警告: 次のように実行できます。 ```bash my_app.py ... --log_level warning --log_level_replica error ``` マルチノード環境で、各ノードのメインプロセスのログを繰り返したくない場合は、次のようにします。 上記を次のように変更します。 ```bash my_app.py ... --log_level warning --log_level_replica error --log_on_each_node 0 ``` その後、最初のノードのメイン プロセスのみが「警告」レベルでログに記録され、メイン ノード上の他のすべてのプロセスはログに記録されます。 ノードと他のノード上のすべてのプロセスは「エラー」レベルでログに記録されます。 アプリケーションをできるだけ静かにする必要がある場合は、次のようにします。 ```bash my_app.py ... --log_level error --log_level_replica error --log_on_each_node 0 ``` (マルチノード環境の場合は `--log_on_each_node 0` を追加します) ## Randomness [`Trainer`] によって生成されたチェックポイントから再開する場合、すべての努力がその状態を復元するために行われます。 _python_、_numpy_、および _pytorch_ の RNG 状態は、そのチェックポイントを保存した時点と同じ状態になります。 これにより、「停止して再開」というスタイルのトレーニングが、ノンストップトレーニングに可能な限り近づけられるはずです。 ただし、さまざまなデフォルトの非決定的な pytorch 設定により、これは完全に機能しない可能性があります。フルをご希望の場合は 決定論については、[ランダム性のソースの制御](https://pytorch.org/docs/stable/notes/randomness) を参照してください。ドキュメントで説明されているように、これらの設定の一部は 物事を決定論的にするもの (例: `torch.backends.cudnn.deterministic`) は物事を遅くする可能性があるため、これは デフォルトでは実行できませんが、必要に応じて自分で有効にすることができます。 ## Specific GPUs Selection どの GPU をどのような順序で使用するかをプログラムに指示する方法について説明します。 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.Parallel.DistributedDataParallel.html) を使用して GPU のサブセットのみを使用する場合、使用する GPU の数を指定するだけです。 。たとえば、GPU が 4 つあるが、最初の 2 つを使用したい場合は、次のようにします。 ```bash torchrun --nproc_per_node=2 trainer-program.py ... ``` [`accelerate`](https://github.com/huggingface/accelerate) または [`deepspeed`](https://github.com/microsoft/DeepSpeed) がインストールされている場合は、次を使用して同じことを達成することもできます。の一つ: ```bash accelerate launch --num_processes 2 trainer-program.py ... ``` ```bash deepspeed --num_gpus 2 trainer-program.py ... ``` これらのランチャーを使用するために、Accelerate または [Deepspeed 統合](deepspeed) 機能を使用する必要はありません。 これまでは、プログラムに使用する GPU の数を指示できました。次に、特定の GPU を選択し、その順序を制御する方法について説明します。 次の環境変数は、使用する GPU とその順序を制御するのに役立ちます。 **`CUDA_VISIBLE_DEVICES`** 複数の GPU があり、そのうちの 1 つまたはいくつかの GPU だけを使用したい場合は、環境変数 `CUDA_VISIBLE_DEVICES` を使用する GPU のリストに設定します。 たとえば、4 つの GPU (0、1、2、3) があるとします。物理 GPU 0 と 2 のみで実行するには、次のようにします。 ```bash CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` したがって、pytorch は 2 つの GPU のみを認識し、物理 GPU 0 と 2 はそれぞれ `cuda:0` と `cuda:1` にマッピングされます。 順序を変更することもできます。 ```bash CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ... ``` ここでは、物理 GPU 0 と 2 がそれぞれ`cuda:1`と`cuda:0`にマッピングされています。 上記の例はすべて `DistributedDataParallel` 使用パターンのものですが、同じ方法が [`DataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) でも機能します。 ```bash CUDA_VISIBLE_DEVICES=2,0 python trainer-program.py ... ``` GPU のない環境をエミュレートするには、次のようにこの環境変数を空の値に設定するだけです。 ```bash CUDA_VISIBLE_DEVICES= python trainer-program.py ... ``` 他の環境変数と同様に、これらをコマンド ラインに追加する代わりに、次のようにエクスポートすることもできます。 ```bash export CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` ただし、この方法では、以前に環境変数を設定したことを忘れて、なぜ間違った GPU が使用されているのか理解できない可能性があるため、混乱を招く可能性があります。したがって、このセクションのほとんどの例で示されているように、同じコマンド ラインで特定の実行に対してのみ環境変数を設定するのが一般的です。 **`CUDA_DEVICE_ORDER`** 物理デバイスの順序を制御する追加の環境変数 `CUDA_DEVICE_ORDER` があります。選択肢は次の 2 つです。 1. PCIe バス ID 順 (`nvidia-smi` の順序と一致) - これがデフォルトです。 ```bash export CUDA_DEVICE_ORDER=PCI_BUS_ID ``` 2. GPU コンピューティング能力順に並べる ```bash export CUDA_DEVICE_ORDER=FASTEST_FIRST ``` ほとんどの場合、この環境変数を気にする必要はありませんが、古い GPU と新しい GPU が物理的に挿入されているため、遅い古いカードが遅くなっているように見えるような偏ったセットアップを行っている場合には、非常に役立ちます。初め。これを解決する 1 つの方法は、カードを交換することです。ただし、カードを交換できない場合 (デバイスの冷却が影響を受けた場合など)、`CUDA_DEVICE_ORDER=FASTEST_FIRST`を設定すると、常に新しい高速カードが最初に配置されます。ただし、`nvidia-smi`は依然として PCIe の順序でレポートするため、多少混乱するでしょう。 順序を入れ替えるもう 1 つの解決策は、以下を使用することです。 ```bash export CUDA_VISIBLE_DEVICES=1,0 ``` この例では 2 つの GPU だけを使用していますが、もちろん、コンピューターに搭載されている数の GPU にも同じことが当てはまります。 また、この環境変数を設定する場合は、`~/.bashrc` ファイルまたはその他の起動設定ファイルに設定して、忘れるのが最善です。 ## Trainer Integrations [`Trainer`] は、トレーニングを劇的に改善する可能性のあるライブラリをサポートするように拡張されました。 時間とはるかに大きなモデルに適合します。 現在、サードパーティのソリューション [DeepSpeed](https://github.com/microsoft/DeepSpeed) および [PyTorch FSDP](https://pytorch.org/docs/stable/fsdp.html) をサポートしています。論文 [ZeRO: メモリの最適化兆パラメータ モデルのトレーニングに向けて、Samyam Rajbhandari、Jeff Rasley、Olatunji Ruwase、Yuxiong He 著](https://arxiv.org/abs/1910.02054)。 この提供されるサポートは、この記事の執筆時点では新しくて実験的なものです。 DeepSpeed と PyTorch FSDP のサポートはアクティブであり、それに関する問題は歓迎しますが、FairScale 統合は PyTorch メインに統合されているため、もうサポートしていません ([PyTorch FSDP 統合](#pytorch-fully-sharded-data-parallel)) <a id='zero-install-notes'></a> ### CUDA Extension Installation Notes この記事の執筆時点では、Deepspeed を使用するには、CUDA C++ コードをコンパイルする必要があります。 すべてのインストールの問題は、[Deepspeed](https://github.com/microsoft/DeepSpeed/issues) の対応する GitHub の問題を通じて対処する必要がありますが、ビルド中に発生する可能性のある一般的な問題がいくつかあります。 CUDA 拡張機能を構築する必要がある PyTorch 拡張機能。 したがって、次の操作を実行中に CUDA 関連のビルドの問題が発生した場合は、次のとおりです。 ```bash pip install deepspeed ``` まず次の注意事項をお読みください。 これらのノートでは、`pytorch` が CUDA `10.2` でビルドされた場合に何をすべきかの例を示します。あなたの状況が次のような場合 異なる場合は、バージョン番号を目的のバージョンに調整することを忘れないでください。 #### Possible problem #1 Pytorch には独自の CUDA ツールキットが付属していますが、これら 2 つのプロジェクトをビルドするには、同一バージョンの CUDA が必要です。 システム全体にインストールされます。 たとえば、Python 環境に `cudatoolkit==10.2` を指定して `pytorch` をインストールした場合は、次のものも必要です。 CUDA `10.2` がシステム全体にインストールされました。 正確な場所はシステムによって異なる場合がありますが、多くのシステムでは`/usr/local/cuda-10.2`が最も一般的な場所です。 Unix システム。 CUDA が正しく設定され、`PATH`環境変数に追加されると、 次のようにしてインストール場所を指定します。 ```bash which nvcc ``` CUDA がシステム全体にインストールされていない場合は、最初にインストールしてください。お気に入りを使用して手順を見つけることができます 検索エンジン。たとえば、Ubuntu を使用している場合は、[ubuntu cuda 10.2 install](https://www.google.com/search?q=ubuntu+cuda+10.2+install) を検索するとよいでしょう。 #### Possible problem #2 もう 1 つの考えられる一般的な問題は、システム全体に複数の CUDA ツールキットがインストールされている可能性があることです。たとえばあなた がある可能性があり: ```bash /usr/local/cuda-10.2 /usr/local/cuda-11.0 ``` この状況では、`PATH` および `LD_LIBRARY_PATH` 環境変数に以下が含まれていることを確認する必要があります。 目的の CUDA バージョンへの正しいパス。通常、パッケージ インストーラーは、これらに、 最後のバージョンがインストールされました。適切なパッケージが見つからないためにパッケージのビルドが失敗するという問題が発生した場合は、 CUDA バージョンがシステム全体にインストールされているにもかかわらず、前述の 2 つを調整する必要があることを意味します 環境変数。 まず、その内容を見てみましょう。 ```bash echo $PATH echo $LD_LIBRARY_PATH ``` それで、中に何が入っているかがわかります。 `LD_LIBRARY_PATH` が空である可能性があります。 `PATH` は実行可能ファイルが存在する場所をリストし、`LD_LIBRARY_PATH` は共有ライブラリの場所を示します。 探すことです。どちらの場合も、前のエントリが後のエントリより優先されます。 `:` は複数を区切るために使用されます エントリ。 ここで、ビルド プログラムに特定の CUDA ツールキットの場所を指示するには、最初にリストされる希望のパスを挿入します。 やっていること: ```bash export PATH=/usr/local/cuda-10.2/bin:$PATH export LD_LIBRARY_PATH=/usr/local/cuda-10.2/lib64:$LD_LIBRARY_PATH ``` 既存の値を上書きするのではなく、先頭に追加することに注意してください。 もちろん、必要に応じてバージョン番号やフルパスを調整します。割り当てたディレクトリが実際に機能することを確認してください 存在する。 `lib64` サブディレクトリは、`libcudart.so` などのさまざまな CUDA `.so` オブジェクトが存在する場所です。 システムでは別の名前が付けられますが、現実を反映するように調整してください。 #### Possible problem #3 一部の古い CUDA バージョンは、新しいコンパイラでのビルドを拒否する場合があります。たとえば、あなたは`gcc-9`を持っていますが、それが必要です `gcc-7`。 それにはさまざまな方法があります。 最新の CUDA ツールキットをインストールできる場合は、通常、新しいコンパイラがサポートされているはずです。 あるいは、既に所有しているコンパイラに加えて、下位バージョンのコンパイラをインストールすることもできます。 すでに存在しますが、デフォルトではないため、ビルドシステムはそれを認識できません。 「gcc-7」がインストールされているが、 ビルドシステムが見つからないというメッセージを表示する場合は、次の方法で解決できる可能性があります。 ```bash sudo ln -s /usr/bin/gcc-7 /usr/local/cuda-10.2/bin/gcc sudo ln -s /usr/bin/g++-7 /usr/local/cuda-10.2/bin/g++ ``` ここでは、`/usr/local/cuda-10.2/bin/gcc` から `gcc-7` へのシンボリックリンクを作成しています。 `/usr/local/cuda-10.2/bin/` は `PATH` 環境変数内にある必要があります (前の問題の解決策を参照)。 `gcc-7` (および `g++7`) が見つかるはずで、ビルドは成功します。 いつものように、状況に合わせて例のパスを編集してください。 ### PyTorch Fully Sharded Data parallel より大きなバッチ サイズで巨大なモデルのトレーニングを高速化するには、完全にシャード化されたデータ並列モデルを使用できます。 このタイプのデータ並列パラダイムでは、オプティマイザーの状態、勾配、パラメーターをシャーディングすることで、より多くのデータと大規模なモデルをフィッティングできます。 この機能とその利点の詳細については、[完全シャーディング データ並列ブログ](https://pytorch.org/blog/introducing-pytorch-full-sharded-data-Parallel-api/) をご覧ください。 最新の PyTorch の Fully Sharded Data Parallel (FSDP) トレーニング機能を統合しました。 必要なのは、設定を通じて有効にすることだけです。 **FSDP サポートに必要な PyTorch バージョン**: PyTorch Nightly (リリース後にこれを読んだ場合は 1.12.0) FSDP を有効にしたモデルの保存は、最近の修正でのみ利用できるためです。 **使用法**: - 配布されたランチャーが追加されていることを確認してください まだ使用していない場合は、`-m torch.distributed.launch --nproc_per_node=NUMBER_OF_GPUS_YOU_HAVE`を使用します。 - **シャーディング戦略**: - FULL_SHARD : データ並列ワーカー/GPU にわたるシャード オプティマイザーの状態 + 勾配 + モデル パラメーター。 このためには、コマンドライン引数に`--fsdp full_shard`を追加します。 - SHARD_GRAD_OP : シャード オプティマイザーの状態 + データ並列ワーカー/GPU 全体の勾配。 このためには、コマンドライン引数に`--fsdp shard_grad_op`を追加します。 - NO_SHARD : シャーディングなし。このためには、コマンドライン引数に`--fsdp no_shard`を追加します。 - パラメータと勾配を CPU にオフロードするには、 コマンドライン引数に`--fsdp "full_shard offload"`または`--fsdp "shard_grad_op offload"`を追加します。 - `default_auto_wrap_policy` を使用して FSDP でレイヤーを自動的に再帰的にラップするには、 コマンドライン引数に`--fsdp "full_shard auto_wrap"`または`--fsdp "shard_grad_op auto_wrap"`を追加します。 - CPU オフロードと自動ラッピングの両方を有効にするには、 コマンドライン引数に`--fsdp "full_shard offload auto_wrap"`または`--fsdp "shard_grad_op offload auto_wrap"`を追加します。 - 残りの FSDP 構成は、`--fsdp_config <path_to_fsdp_config.json>`を介して渡されます。それは、次のいずれかの場所です。 FSDP json 構成ファイル (例: `fsdp_config.json`)、またはすでにロードされている json ファイルを `dict` として使用します。 - 自動ラッピングが有効な場合は、トランスベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーを使用できます。 - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 したがって、トランスベースのモデルにはこれを使用してください。 - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 自動ラッピングのための FSDP のパラメータの最小数を指定します。 - 設定ファイルで `fsdp_backward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 `backward_pre` と `backward_pos` が利用可能なオプションです。 詳細については、`torch.distributed.fsdp.full_sharded_data_Parallel.BackwardPrefetch`を参照してください。 - 設定ファイルで `fsdp_forward_prefetch` を指定できるようになりました。次のパラメータのセットをいつプリフェッチするかを制御します。 `True`の場合、FSDP はフォワード パスでの実行中に、次に来るオールギャザーを明示的にプリフェッチします。 - 設定ファイルで `limit_all_gathers` を指定できるようになりました。 `True`の場合、FSDP は CPU スレッドを明示的に同期して、実行中のオールギャザが多すぎるのを防ぎます。 - `activation_checkpointing`を設定ファイルで指定できるようになりました。 `True`の場合、FSDP アクティベーション チェックポイントは、FSDP のアクティベーションをクリアすることでメモリ使用量を削減する手法です。 特定のレイヤーを処理し、バックワード パス中にそれらを再計算します。事実上、これは余分な計算時間を犠牲にします メモリ使用量を削減します。 **注意すべき注意点がいくつかあります** - これは `generate` と互換性がないため、 `--predict_with_generate` とも互換性がありません すべての seq2seq/clm スクリプト (翻訳/要約/clm など)。 問題 [#21667](https://github.com/huggingface/transformers/issues/21667) を参照してください。 ### PyTorch/XLA Fully Sharded Data parallel TPU ユーザーの皆様に朗報です。 PyTorch/XLA は FSDP をサポートするようになりました。 最新の Fully Sharded Data Parallel (FSDP) トレーニングがすべてサポートされています。 詳細については、[FSDP を使用した Cloud TPU での PyTorch モデルのスケーリング](https://pytorch.org/blog/scaling-pytorch-models-on-cloud-tpus-with-fsdp/) および [PyTorch/XLA 実装 を参照してください。 FSDP の](https://github.com/pytorch/xla/tree/master/torch_xla/distributed/fsdp) 必要なのは、設定を通じて有効にすることだけです。 **FSDP サポートに必要な PyTorch/XLA バージョン**: >=2.0 **使用法**: `--fsdp "full shard"` を、`--fsdp_config <path_to_fsdp_config.json>` に加えられる次の変更とともに渡します。 - PyTorch/XLA FSDP を有効にするには、`xla`を`True`に設定する必要があります。 - `xla_fsdp_settings` 値は、XLA FSDP ラッピング パラメータを格納する辞書です。 オプションの完全なリストについては、[こちら]( https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_full_sharded_data_Parallel.py)。 - `xla_fsdp_grad_ckpt`。 `True`の場合、ネストされた XLA FSDP でラップされた各レイヤー上で勾配チェックポイントを使用します。 この設定は、xla フラグが true に設定されており、自動ラッピング ポリシーが指定されている場合にのみ使用できます。 `fsdp_min_num_params` または `fsdp_transformer_layer_cls_to_wrap`。 - トランスフォーマー ベースの自動ラップ ポリシーまたはサイズ ベースの自動ラップ ポリシーのいずれかを使用できます。 - トランスフォーマーベースの自動ラップポリシーの場合、構成ファイルで `fsdp_transformer_layer_cls_to_wrap` を指定することをお勧めします。指定しない場合、使用可能な場合、デフォルト値は `model._no_split_modules` になります。 これは、ラップするトランスフォーマー層クラス名のリスト (大文字と小文字を区別) を指定します (例: [`BertLayer`]、[`GPTJBlock`]、[`T5Block`] ...)。 重みを共有するサブモジュール (埋め込み層など) が異なる FSDP ラップされたユニットにならないようにする必要があるため、これは重要です。 このポリシーを使用すると、マルチヘッド アテンションとそれに続くいくつかの MLP レイヤーを含むブロックごとにラッピングが発生します。 共有埋め込みを含む残りの層は、同じ最も外側の FSDP ユニットにラップされるのが便利です。 したがって、トランスベースのモデルにはこれを使用してください。 - サイズベースの自動ラップポリシーの場合は、設定ファイルに`fsdp_min_num_params`を追加してください。 自動ラッピングのための FSDP のパラメータの最小数を指定します。 ### Using Trainer for accelerated PyTorch Training on Mac PyTorch v1.12 リリースにより、開発者と研究者は Apple シリコン GPU を利用してモデル トレーニングを大幅に高速化できます。 これにより、プロトタイピングや微調整などの機械学習ワークフローを Mac 上でローカルで実行できるようになります。 PyTorch のバックエンドとしての Apple の Metal Performance Shaders (MPS) はこれを可能にし、新しい `"mps"` デバイス経由で使用できます。 これにより、計算グラフとプリミティブが MPS Graph フレームワークと MPS によって提供される調整されたカーネルにマッピングされます。 詳細については、公式ドキュメント [Mac での Accelerated PyTorch Training の紹介](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) を参照してください。 および [MPS バックエンド](https://pytorch.org/docs/stable/notes/mps.html)。 <Tip warning={false}> MacOS マシンに PyTorch >= 1.13 (執筆時点ではナイトリー バージョン) をインストールすることを強くお勧めします。 トランスベースのモデルのモデルの正確性とパフォーマンスの向上に関連する主要な修正が行われています。 詳細については、https://github.com/pytorch/pytorch/issues/82707 を参照してください。 </Tip> **Apple Silicon チップを使用したトレーニングと推論の利点** 1. ユーザーがローカルで大規模なネットワークやバッチ サイズをトレーニングできるようにします 2. ユニファイド メモリ アーキテクチャにより、データ取得の遅延が短縮され、GPU がメモリ ストア全体に直接アクセスできるようになります。 したがって、エンドツーエンドのパフォーマンスが向上します。 3. クラウドベースの開発に関連するコストや追加のローカル GPU の必要性を削減します。 **前提条件**: mps サポートを備えたトーチをインストールするには、 この素晴らしいメディア記事 [GPU アクセラレーションが M1 Mac の PyTorch に登場](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1) に従ってください。 。 **使用法**: `mps` デバイスは、`cuda` デバイスが使用される方法と同様に利用可能な場合、デフォルトで使用されます。 したがって、ユーザーによるアクションは必要ありません。 たとえば、以下のコマンドを使用して、Apple Silicon GPU を使用して公式の Glue テキスト分類タスクを (ルート フォルダーから) 実行できます。 ```bash export TASK_NAME=mrpc python examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` **注意すべきいくつかの注意事項** 1. 一部の PyTorch 操作は mps に実装されていないため、エラーがスローされます。 これを回避する 1 つの方法は、環境変数 `PYTORCH_ENABLE_MPS_FALLBACK=1` を設定することです。 これらの操作では CPU にフォールバックします。ただし、それでも UserWarning がスローされます。 2. 分散セットアップ`gloo`および`nccl`は、`mps`デバイスでは動作しません。 これは、現在「mps」デバイス タイプの単一 GPU のみを使用できることを意味します。 最後に、覚えておいてください。 🤗 `Trainer` は MPS バックエンドのみを統合するため、 MPS バックエンドの使用に関して問題や質問がある場合は、 [PyTorch GitHub](https://github.com/pytorch/pytorch/issues) に問題を提出してください。 ## Using Accelerate Launcher with Trainer 加速してトレーナーにパワーを与えましょう。ユーザーが期待することに関しては、次のとおりです。 - トレーナー引数に対して FSDP、DeepSpeed などのトレーナー インテレーションを変更せずに使用し続けることができます。 - トレーナーで Accelerate Launcher を使用できるようになりました (推奨)。 トレーナーで Accelerate Launcher を使用する手順: 1. 🤗 Accelerate がインストールされていることを確認してください。Accelerate がないと `Trainer` を使用することはできません。そうでない場合は、`pip install accelerate`してください。 Accelerate のバージョンを更新する必要がある場合もあります: `pip install activate --upgrade` 2. `accelerate config`を実行し、アンケートに記入します。以下は加速設定の例です。 a. DDP マルチノード マルチ GPU 構成: ```yaml compute_environment: LOCAL_MACHINE distributed_type: MULTI_GPU downcast_bf16: 'no' gpu_ids: all machine_rank: 0 #change rank as per the node main_process_ip: 192.168.20.1 main_process_port: 9898 main_training_function: main mixed_precision: fp16 num_machines: 2 num_processes: 8 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` b. FSDP config: ```yaml compute_environment: LOCAL_MACHINE distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: true fsdp_offload_params: false fsdp_sharding_strategy: 1 fsdp_state_dict_type: FULL_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` c.ファイルを指す DeepSpeed 構成: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/user/configs/ds_zero3_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` d.加速プラグインを使用した DeepSpeed 構成: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 0.7 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED downcast_bf16: 'no' machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` 3. 加速設定またはランチャー引数によって上記で処理された引数以外の引数を使用して、トレーナー スクリプトを実行します。 以下は、上記の FSDP 構成で`accelerate launcher`を使用して`run_glue.py`を実行する例です。 ```bash cd transformers accelerate launch \ ./examples/pytorch/text-classification/run_glue.py \ --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` 4. `accelerate launch`するための cmd 引数を直接使用することもできます。上の例は次のようにマッピングされます。 ```bash cd transformers accelerate launch --num_processes=2 \ --use_fsdp \ --mixed_precision=bf16 \ --fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP \ --fsdp_transformer_layer_cls_to_wrap="BertLayer" \ --fsdp_sharding_strategy=1 \ --fsdp_state_dict_type=FULL_STATE_DICT \ ./examples/pytorch/text-classification/run_glue.py --model_name_or_path google-bert/bert-base-cased \ --task_name $TASK_NAME \ --do_train \ --do_eval \ --max_seq_length 128 \ --per_device_train_batch_size 16 \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$TASK_NAME/ \ --overwrite_output_dir ``` 詳細については、🤗 Accelerate CLI ガイドを参照してください: [🤗 Accelerate スクリプトの起動](https://huggingface.co/docs/accelerate/basic_tutorials/launch)。 移動されたセクション: [ <a href="./deepspeed#deepspeed-trainer-integration">DeepSpeed</a><a id="deepspeed"></a> | <a href="./deepspeed#deepspeed-installation">Installation</a><a id="installation"></a> | <a href="./deepspeed#deepspeed-multi-gpu">Deployment with multiple GPUs</a><a id="deployment-with-multiple-gpus"></a> | <a href="./deepspeed#deepspeed-one-gpu">Deployment with one GPU</a><a id="deployment-with-one-gpu"></a> | <a href="./deepspeed#deepspeed-notebook">Deployment in Notebooks</a><a id="deployment-in-notebooks"></a> | <a href="./deepspeed#deepspeed-config">Configuration</a><a id="configuration"></a> | <a href="./deepspeed#deepspeed-config-passing">Passing Configuration</a><a id="passing-configuration"></a> | <a href="./deepspeed#deepspeed-config-shared">Shared Configuration</a><a id="shared-configuration"></a> | <a href="./deepspeed#deepspeed-zero">ZeRO</a><a id="zero"></a> | <a href="./deepspeed#deepspeed-zero2-config">ZeRO-2 Config</a><a id="zero-2-config"></a> | <a href="./deepspeed#deepspeed-zero3-config">ZeRO-3 Config</a><a id="zero-3-config"></a> | <a href="./deepspeed#deepspeed-nvme">NVMe Support</a><a id="nvme-support"></a> | <a href="./deepspeed#deepspeed-zero2-zero3-performance">ZeRO-2 vs ZeRO-3 Performance</a><a id="zero-2-vs-zero-3-performance"></a> | <a href="./deepspeed#deepspeed-zero2-example">ZeRO-2 Example</a><a id="zero-2-example"></a> | <a href="./deepspeed#deepspeed-zero3-example">ZeRO-3 Example</a><a id="zero-3-example"></a> | <a href="./deepspeed#deepspeed-optimizer">Optimizer</a><a id="optimizer"></a> | <a href="./deepspeed#deepspeed-scheduler">Scheduler</a><a id="scheduler"></a> | <a href="./deepspeed#deepspeed-fp32">fp32 Precision</a><a id="fp32-precision"></a> | <a href="./deepspeed#deepspeed-amp">Automatic Mixed Precision</a><a id="automatic-mixed-precision"></a> | <a href="./deepspeed#deepspeed-bs">Batch Size</a><a id="batch-size"></a> | <a href="./deepspeed#deepspeed-grad-acc">Gradient Accumulation</a><a id="gradient-accumulation"></a> | <a href="./deepspeed#deepspeed-grad-clip">Gradient Clipping</a><a id="gradient-clipping"></a> | <a href="./deepspeed#deepspeed-weight-extraction">Getting The Model Weights Out</a><a id="getting-the-model-weights-out"></a> ]
transformers/docs/source/ja/main_classes/trainer.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/trainer.md", "repo_id": "transformers", "token_count": 19572 }
269
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BigBird ## Overview BigBird モデルは、[Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) で提案されました。 ザヒール、マンジルとグルガネシュ、グルとダベイ、クマール・アヴィナヴァとエインズリー、ジョシュアとアルベルティ、クリスとオンタノン、 サンティアゴとファム、フィリップとラブラ、アニルードとワン、キーファンとヤン、リーなど。 BigBird は注目度が低い BERT などの Transformer ベースのモデルをさらに長いシーケンスに拡張する、Transformer ベースのモデル。まばらに加えて アテンションと同様に、BigBird は入力シーケンスにランダム アテンションだけでなくグローバル アテンションも適用します。理論的には、 まばらで全体的でランダムな注意を適用すると、完全な注意に近づくことが示されていますが、 長いシーケンスでは計算効率が大幅に向上します。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や BERT または RoBERTa と比較した要約。 論文の要約は次のとおりです。 *BERT などのトランスフォーマーベースのモデルは、NLP で最も成功した深層学習モデルの 1 つです。 残念ながら、それらの中核的な制限の 1 つは、シーケンスに対する二次依存性 (主にメモリに関する) です。 完全な注意メカニズムによる長さです。これを解決するために、BigBird は、まばらな注意メカニズムを提案します。 この二次依存関係を線形に削減します。 BigBird がシーケンス関数の汎用近似器であることを示します。 チューリングは完全であるため、二次完全注意モデルのこれらの特性が保存されます。途中、私たちの 理論分析により、O(1) 個のグローバル トークン (CLS など) を持つ利点の一部が明らかになり、 スパース注意メカニズムの一部としてのシーケンス。提案されたスパース アテンションは、次の長さのシーケンスを処理できます。 同様のハードウェアを使用して以前に可能であったものの 8 倍。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や要約などのさまざまな NLP タスクのパフォーマンスを大幅に向上させます。私達も ゲノミクスデータへの新しいアプリケーションを提案します。* チップ: - BigBird の注意がどのように機能するかについての詳細な説明については、[このブログ投稿](https://huggingface.co/blog/big-bird) を参照してください。 - BigBird には、**original_full** と **block_sparse** の 2 つの実装が付属しています。シーケンス長が 1024 未満の場合、次を使用します。 **block_sparse** を使用してもメリットがないため、**original_full** を使用することをお勧めします。 - コードは現在、3 ブロックと 2 グローバル ブロックのウィンドウ サイズを使用しています。 - シーケンスの長さはブロック サイズで割り切れる必要があります。 - 現在の実装では **ITC** のみがサポートされています。 - 現在の実装では **num_random_blocks = 0** はサポートされていません - BigBird は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左。 このモデルは、[vasudevgupta](https://huggingface.co/vasudevgupta) によって提供されました。元のコードが見つかる [こちら](https://github.com/google-research/bigbird)。 ## ドキュメント リソース - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) - [マスクされた言語モデリング タスク ガイド](../tasks/masked_lang_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## BigBirdConfig [[autodoc]] BigBirdConfig ## BigBirdTokenizer [[autodoc]] BigBirdTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BigBirdTokenizerFast [[autodoc]] BigBirdTokenizerFast ## BigBird specific outputs [[autodoc]] models.big_bird.modeling_big_bird.BigBirdForPreTrainingOutput <frameworkcontent> <pt> ## BigBirdModel [[autodoc]] BigBirdModel - forward ## BigBirdForPreTraining [[autodoc]] BigBirdForPreTraining - forward ## BigBirdForCausalLM [[autodoc]] BigBirdForCausalLM - forward ## BigBirdForMaskedLM [[autodoc]] BigBirdForMaskedLM - forward ## BigBirdForSequenceClassification [[autodoc]] BigBirdForSequenceClassification - forward ## BigBirdForMultipleChoice [[autodoc]] BigBirdForMultipleChoice - forward ## BigBirdForTokenClassification [[autodoc]] BigBirdForTokenClassification - forward ## BigBirdForQuestionAnswering [[autodoc]] BigBirdForQuestionAnswering - forward </pt> <jax> ## FlaxBigBirdModel [[autodoc]] FlaxBigBirdModel - __call__ ## FlaxBigBirdForPreTraining [[autodoc]] FlaxBigBirdForPreTraining - __call__ ## FlaxBigBirdForCausalLM [[autodoc]] FlaxBigBirdForCausalLM - __call__ ## FlaxBigBirdForMaskedLM [[autodoc]] FlaxBigBirdForMaskedLM - __call__ ## FlaxBigBirdForSequenceClassification [[autodoc]] FlaxBigBirdForSequenceClassification - __call__ ## FlaxBigBirdForMultipleChoice [[autodoc]] FlaxBigBirdForMultipleChoice - __call__ ## FlaxBigBirdForTokenClassification [[autodoc]] FlaxBigBirdForTokenClassification - __call__ ## FlaxBigBirdForQuestionAnswering [[autodoc]] FlaxBigBirdForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/big_bird.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/big_bird.md", "repo_id": "transformers", "token_count": 2762 }
270
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLAP ## Overview CLAP モデルは、[Large Scale Contrastive Language-Audio pretraining with feature fusion and keyword-to-caption augmentation](https://arxiv.org/pdf/2211.06687.pdf)、Yusong Wu、Ke Chen、Tianyu Zhang、Yuchen Hui、Taylor Berg-Kirkpatrick、Shlomo Dubnov 著。 CLAP (Contrastive Language-Audio Pretraining) は、さまざまな (音声、テキスト) ペアでトレーニングされたニューラル ネットワークです。タスクに合わせて直接最適化することなく、音声が与えられた場合に最も関連性の高いテキスト スニペットを予測するように指示できます。 CLAP モデルは、SWINTransformer を使用して log-Mel スペクトログラム入力からオーディオ特徴を取得し、RoBERTa モデルを使用してテキスト特徴を取得します。次に、テキストとオーディオの両方の特徴が、同じ次元の潜在空間に投影されます。投影されたオーディオとテキストの特徴の間のドット積が、同様のスコアとして使用されます。 論文の要約は次のとおりです。 *対照学習は、マルチモーダル表現学習の分野で目覚ましい成功を収めています。この論文では、音声データと自然言語記述を組み合わせて音声表現を開発する、対照的な言語音声事前トレーニングのパイプラインを提案します。この目標を達成するために、私たちはまず、さまざまなデータ ソースからの 633,526 個の音声とテキストのペアの大規模なコレクションである LAION-Audio-630K をリリースします。次に、さまざまなオーディオ エンコーダとテキスト エンコーダを考慮して、対照的な言語とオーディオの事前トレーニング モデルを構築します。機能融合メカニズムとキーワードからキャプションへの拡張をモデル設計に組み込んで、モデルが可変長の音声入力を処理できるようにし、パフォーマンスを向上させます。 3 番目に、包括的な実験を実行して、テキストから音声への取得、ゼロショット音声分類、教師付き音声分類の 3 つのタスクにわたってモデルを評価します。結果は、私たちのモデルがテキストから音声への検索タスクにおいて優れたパフォーマンスを達成していることを示しています。オーディオ分類タスクでは、モデルはゼロショット設定で最先端のパフォーマンスを達成し、非ゼロショット設定でもモデルの結果に匹敵するパフォーマンスを得ることができます。 LAION-オーディオ-6* このモデルは、[Younes Belkada](https://huggingface.co/ybelkada) および [Arthur Zucker](https://huggingface.co/ArthurZ) によって提供されました。 元のコードは [こちら](https://github.com/LAION-AI/Clap) にあります。 ## ClapConfig [[autodoc]] ClapConfig - from_text_audio_configs ## ClapTextConfig [[autodoc]] ClapTextConfig ## ClapAudioConfig [[autodoc]] ClapAudioConfig ## ClapFeatureExtractor [[autodoc]] ClapFeatureExtractor ## ClapProcessor [[autodoc]] ClapProcessor ## ClapModel [[autodoc]] ClapModel - forward - get_text_features - get_audio_features ## ClapTextModel [[autodoc]] ClapTextModel - forward ## ClapTextModelWithProjection [[autodoc]] ClapTextModelWithProjection - forward ## ClapAudioModel [[autodoc]] ClapAudioModel - forward ## ClapAudioModelWithProjection [[autodoc]] ClapAudioModelWithProjection - forward
transformers/docs/source/ja/model_doc/clap.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clap.md", "repo_id": "transformers", "token_count": 1775 }
271
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeBERTa ## Overview DeBERTa モデルは、Pengcheng He、Xiaodong Liu、Jianfeng Gao、Weizhu Chen によって [DeBERTa: Decoding-enhanced BERT with Disentangled Attendant](https://arxiv.org/abs/2006.03654) で提案されました。Google のモデルに基づいています。 2018年にリリースされたBERTモデルと2019年にリリースされたFacebookのRoBERTaモデル。 これは、もつれた注意を解きほぐし、使用されるデータの半分を使用して強化されたマスク デコーダ トレーニングを備えた RoBERTa に基づいて構築されています。 ロベルタ。 論文の要約は次のとおりです。 *事前トレーニングされたニューラル言語モデルの最近の進歩により、多くの自然言語モデルのパフォーマンスが大幅に向上しました。 言語処理 (NLP) タスク。この論文では、新しいモデル アーキテクチャ DeBERTa (Decoding-enhanced BERT with これは、2 つの新しい技術を使用して BERT モデルと RoBERTa モデルを改善します。 1つ目は、 もつれを解く注意メカニズム。各単語は、その内容をエンコードする 2 つのベクトルを使用して表現され、 単語間の注意の重みは、それらの単語のもつれ解除行列を使用して計算されます。 内容と相対的な位置。 2 番目に、強化されたマスク デコーダを使用して、出力ソフトマックス レイヤを次のように置き換えます。 モデルの事前トレーニング用にマスクされたトークンを予測します。これら 2 つの手法により効率が大幅に向上することを示します。 モデルの事前トレーニングと下流タスクのパフォーマンスの向上。 RoBERTa-Large と比較すると、DeBERTa モデルは半分のレベルでトレーニングされています。 トレーニング データは幅広い NLP タスクで一貫して優れたパフォーマンスを示し、MNLI で +0.9% の改善を達成しました。 (90.2% 対 91.1%)、SQuAD v2.0 では +2.3% (88.4% 対 90.7%)、RACE では +3.6% (83.2% 対 86.8%) でした。 DeBERTa コードと 事前トレーニングされたモデルは https://github.com/microsoft/DeBERTa で公開されます。* このモデルは [DeBERTa](https://huggingface.co/DeBERTa) によって寄稿されました。このモデルの TF 2.0 実装は、 [kamalkraj](https://huggingface.co/kamalkraj) による寄稿。元のコードは [こちら](https://github.com/microsoft/DeBERTa) にあります。 ## Resources DeBERTa を使い始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示される) リソースのリスト。ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 <PipelineTag pipeline="text-classification"/> - DeBERTa を使用して [DeepSpeed を使用して大規模モデルのトレーニングを加速する](https://huggingface.co/blog/accelerate-deepspeed) 方法に関するブログ投稿。 - DeBERTa による [機械学習によるスーパーチャージされた顧客サービス](https://huggingface.co/blog/supercharge-customer-service-with-machine-learning) に関するブログ投稿。 - [`DebertaForSequenceClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)。 - [`TFDebertaForSequenceClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)。 - [テキスト分類タスクガイド](../tasks/sequence_classification) <PipelineTag pipeline="token-classification" /> - [`DebertaForTokenClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)。 - [`TFDebertaForTokenClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)。 - [トークン分類](https://huggingface.co/course/chapter7/2?fw=pt) 🤗 ハグフェイスコースの章。 - 🤗 ハグフェイスコースの [バイトペアエンコーディングのトークン化](https://huggingface.co/course/chapter6/5?fw=pt) の章。 - [トークン分類タスクガイド](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`DebertaForMaskedLM`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) でサポートされています。 [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)。 - [`TFDebertaForMaskedLM`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/lang-modeling#run_mlmpy) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)。 - [マスクされた言語モデリング](https://huggingface.co/course/chapter7/3?fw=pt) 🤗 顔のハグ コースの章。 - [マスク言語モデリング タスク ガイド](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`DebertaForQuestionAnswering`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)。 - [`TFDebertaForQuestionAnswering`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)。 - [質問回答](https://huggingface.co/course/chapter7/7?fw=pt) 🤗 ハグフェイスコースの章。 - [質問回答タスク ガイド](../tasks/question_answering) ## DebertaConfig [[autodoc]] DebertaConfig ## DebertaTokenizer [[autodoc]] DebertaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaTokenizerFast [[autodoc]] DebertaTokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences <frameworkcontent> <pt> ## DebertaModel [[autodoc]] DebertaModel - forward ## DebertaPreTrainedModel [[autodoc]] DebertaPreTrainedModel ## DebertaForMaskedLM [[autodoc]] DebertaForMaskedLM - forward ## DebertaForSequenceClassification [[autodoc]] DebertaForSequenceClassification - forward ## DebertaForTokenClassification [[autodoc]] DebertaForTokenClassification - forward ## DebertaForQuestionAnswering [[autodoc]] DebertaForQuestionAnswering - forward </pt> <tf> ## TFDebertaModel [[autodoc]] TFDebertaModel - call ## TFDebertaPreTrainedModel [[autodoc]] TFDebertaPreTrainedModel - call ## TFDebertaForMaskedLM [[autodoc]] TFDebertaForMaskedLM - call ## TFDebertaForSequenceClassification [[autodoc]] TFDebertaForSequenceClassification - call ## TFDebertaForTokenClassification [[autodoc]] TFDebertaForTokenClassification - call ## TFDebertaForQuestionAnswering [[autodoc]] TFDebertaForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deberta.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deberta.md", "repo_id": "transformers", "token_count": 3598 }
272
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Inference on CPU このガイドは、CPU上で大規模なモデルの効率的な推論に焦点を当てています。 ## `BetterTransformer` for faster inference 最近、テキスト、画像、および音声モデルのCPU上での高速な推論のために`BetterTransformer`を統合しました。詳細については、この統合に関するドキュメンテーションを[こちら](https://huggingface.co/docs/optimum/bettertransformer/overview)で確認してください。 ## PyTorch JITモード(TorchScript) TorchScriptは、PyTorchコードからシリアライズ可能で最適化可能なモデルを作成する方法です。任意のTorchScriptプログラムは、Python依存性のないプロセスで保存およびロードできます。 デフォルトのイーガーモードと比較して、PyTorchのjitモードは通常、オペレーターフュージョンなどの最適化手法によりモデル推論のパフォーマンスが向上します。 TorchScriptの簡単な紹介については、[PyTorch TorchScriptチュートリアル](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules)を参照してください。 ### JITモードでのIPEXグラフ最適化 Intel® Extension for PyTorchは、Transformersシリーズモデルのjitモードにさらなる最適化を提供します。Intel® Extension for PyTorchをjitモードで使用することを強くお勧めします。Transformersモデルからよく使用されるオペレーターパターンのいくつかは、既にIntel® Extension for PyTorchでjitモードのフュージョンに対応しています。これらのフュージョンパターン(Multi-head-attentionフュージョン、Concat Linear、Linear+Add、Linear+Gelu、Add+LayerNormフュージョンなど)は有効でパフォーマンスが良いです。フュージョンの利点は、ユーザーに透過的に提供されます。分析によれば、最も人気のある質問応答、テキスト分類、トークン分類のNLPタスクの約70%が、これらのフュージョンパターンを使用してFloat32精度とBFloat16混合精度の両方でパフォーマンスの利点を得ることができます。 [IPEXグラフ最適化の詳細情報](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html)を確認してください。 #### IPEX installation: IPEXのリリースはPyTorchに従っています。[IPEXのインストール方法](https://intel.github.io/intel-extension-for-pytorch/)を確認してください。 ### Usage of JIT-mode Trainerで評価または予測のためにJITモードを有効にするには、ユーザーはTrainerコマンド引数に`jit_mode_eval`を追加する必要があります。 <Tip warning={true}> PyTorch >= 1.14.0の場合、jitモードはjit.traceでdict入力がサポートされているため、予測と評価に任意のモデルに利益をもたらす可能性があります。 PyTorch < 1.14.0の場合、jitモードはforwardパラメーターの順序がjit.traceのタプル入力の順序と一致するモデルに利益をもたらす可能性があります(質問応答モデルなど)。jit.traceがタプル入力の順序と一致しない場合、テキスト分類モデルなど、jit.traceは失敗し、これをフォールバックさせるために例外でキャッチしています。ログはユーザーに通知するために使用されます。 </Tip> [Transformers質問応答の使用例](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)を参考にしてください。 - Inference using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - Inference with IPEX using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/ja/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/ja/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 1977 }
273
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Webサーバー用のパイプラインの使用 <Tip> 推論エンジンの作成は複雑なトピックであり、"最適な"ソリューションはおそらく問題の領域に依存するでしょう。CPUまたはGPUを使用していますか?最低のレイテンシ、最高のスループット、多くのモデルのサポート、または特定のモデルの高度な最適化を望んでいますか? このトピックに取り組むための多くの方法があり、私たちが紹介するのは、おそらく最適なソリューションではないかもしれないが、始めるための良いデフォルトです。 </Tip> 重要なことは、Webサーバーはリクエストを待機し、受信したように扱うシステムであるため、[データセット](pipeline_tutorial#using-pipelines-on-a-dataset)のように、イテレータを使用できることです。 通常、Webサーバーは並列処理(マルチスレッド、非同期など)されて、さまざまなリクエストを同時に処理します。一方、パイプライン(および主にその基礎となるモデル)は並列処理にはあまり適していません。それらは多くのRAMを使用するため、実行中に利用可能なリソースをすべて提供するか、計算集約型のジョブである場合に最適です。 Webサーバーは受信と送信の軽い負荷を処理し、実際の作業を1つのスレッドで処理するようにします。この例では`starlette`を使用します。実際のフレームワークはあまり重要ではありませんが、別のフレームワークを使用している場合は、同じ効果を得るためにコードを調整または変更する必要があるかもしれません。 `server.py`を作成してください: ```py from starlette.applications import Starlette from starlette.responses import JSONResponse from starlette.routing import Route from transformers import pipeline import asyncio async def homepage(request): payload = await request.body() string = payload.decode("utf-8") response_q = asyncio.Queue() await request.app.model_queue.put((string, response_q)) output = await response_q.get() return JSONResponse(output) async def server_loop(q): pipe = pipeline(model="google-bert/bert-base-uncased") while True: (string, response_q) = await q.get() out = pipe(string) await response_q.put(out) app = Starlette( routes=[ Route("/", homepage, methods=["POST"]), ], ) @app.on_event("startup") async def startup_event(): q = asyncio.Queue() app.model_queue = q asyncio.create_task(server_loop(q)) ``` ここから始めることができます: ```bash uvicorn server:app ``` そして、次のようにクエリできます: ```bash curl -X POST -d "test [MASK]" http://localhost:8000/ #[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...] ``` そして、これでウェブサーバーを作成する方法の良いアイデアを持っています! 本当に重要なのは、モデルを**一度だけ**ロードすることです。これにより、ウェブサーバー上にモデルのコピーがないため、不必要なRAMが使用されなくなります。 その後、キューイングメカニズムを使用して、動的バッチ処理を行うなど、いくつかのアイテムを蓄積してから推論を行うなど、高度な処理を行うことができます: <Tip warning={true}> 以下のコードサンプルは、可読性のために擬似コードのように書かれています。システムリソースに合理的かどうかを確認せずに実行しないでください! </Tip> ```py (string, rq) = await q.get() strings = [] queues = [] while True: try: (string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms except asyncio.exceptions.TimeoutError: break strings.append(string) queues.append(rq) strings outs = pipe(strings, batch_size=len(strings)) for rq, out in zip(queues, outs): await rq.put(out) ``` まず第一に、通常はあまり良いアイデアではないバッチサイズの制限がありません。次に、タイムアウトはキューの取得ごとにリセットされるため、推論を実行する前に1ms以上待つ可能性があります(最初のリクエストの遅延に1ms分遅れが生じます)。 1msの締め切りを1回だけ持つのが良いでしょう。 これは、キューに何もない場合でも常に1ms待機しますが、キューに何もない場合に推論を開始したい場合は適していないかもしれません。ただし、バッチ処理が本当に重要な場合には意味があるかもしれません。再度、1つの最適な解決策は存在しません。 ## Few things you might want to consider ### Error checking 本番環境では多くの問題が発生する可能性があります:メモリ不足、スペース不足、モデルの読み込みが失敗するかもしれません、クエリが誤っているかもしれません、クエリが正しい場合でもモデルの構成エラーのために実行に失敗するかもしれませんなど。 一般的には、サーバーがエラーをユーザーに出力すると良いため、これらのエラーを表示するための多くの`try..except`ステートメントを追加することは良いアイデアです。ただし、セキュリティコンテキストに応じてこれらのエラーをすべて表示することはセキュリティリスクになる可能性があることに注意してください。 ### Circuit breaking Webサーバーは通常、過負荷時に正しいエラーを返す方が良いです。クエリを無期限に待つ代わりに適切なエラーを返します。長時間待つ代わりに503エラーを返すか、長時間待ってから504エラーを返すかです。 提案されたコードでは単一のキューがあるため、キューサイズを見ることは、Webサーバーが負荷に耐える前にエラーを返すための基本的な方法です。 ### Blocking the main thread 現在、PyTorchは非同期を認識していないため、計算はメインスレッドをブロックします。つまり、PyTorchが独自のスレッド/プロセスで実行されるようにすると良いでしょう。提案されたコードは、スレッドと非同期とキューがうまく連携しないため、これは行われていませんが、最終的には同じことを行います。 これは、単一のアイテムの推論が長い場合(>1秒)に重要です。この場合、推論中にすべてのクエリが1秒待たなければならないことを意味します。 ### Dynamic batching 一般的に、バッチ処理は1回のアイテムを1回渡すよりも改善されることは必ずしもありません(詳細は[バッチ処理の詳細](./main_classes/pipelines#pipeline-batching)を参照)。しかし、正しい設定で使用すると非常に効果的です。APIではデフォルトで動的バッチ処理は行われません(遅延の機会が多すぎます)。しかし、非常に大規模なモデルであるBLOOM推論の場合、動的バッチ処理は**重要**です。これにより、すべてのユーザーにとってまともなエクスペリエンスを提供できます。 以上が、提供されたテキストのMarkdown形式の翻訳です。
transformers/docs/source/ja/pipeline_webserver.md/0
{ "file_path": "transformers/docs/source/ja/pipeline_webserver.md", "repo_id": "transformers", "token_count": 3402 }
274
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # How 🤗 Transformers solve tasks [🤗 Transformersでできること](task_summary)で、自然言語処理(NLP)、音声とオーディオ、コンピュータビジョンのタスク、それらの重要なアプリケーションについて学びました。このページでは、モデルがこれらのタスクをどのように解決するかを詳しく見て、モデルの内部で何が起こっているかを説明します。特定のタスクを解決するためには多くの方法があり、一部のモデルは特定のテクニックを実装するか、または新しい観点からタスクに取り組むかもしれませんが、Transformerモデルにとって、一般的なアイデアは同じです。柔軟なアーキテクチャのおかげで、ほとんどのモデルはエンコーダ、デコーダ、またはエンコーダ-デコーダ構造の変種です。Transformerモデル以外にも、当社のライブラリにはコンピュータビジョンタスクに今でも使用されているいくつかの畳み込みニューラルネットワーク(CNN)もあります。また、現代のCNNがどのように機能するかも説明します。 タスクがどのように解決されるかを説明するために、モデル内部で有用な予測を出力するために何が起こるかについて説明します。 - [Wav2Vec2](model_doc/wav2vec2):オーディオ分類および自動音声認識(ASR)向け - [Vision Transformer(ViT)](model_doc/vit)および[ConvNeXT](model_doc/convnext):画像分類向け - [DETR](model_doc/detr):オブジェクト検出向け - [Mask2Former](model_doc/mask2former):画像セグメンテーション向け - [GLPN](model_doc/glpn):深度推定向け - [BERT](model_doc/bert):エンコーダを使用するテキスト分類、トークン分類、および質問応答などのNLPタスク向け - [GPT2](model_doc/gpt2):デコーダを使用するテキスト生成などのNLPタスク向け - [BART](model_doc/bart):エンコーダ-デコーダを使用する要約および翻訳などのNLPタスク向け <Tip> さらに進む前に、元のTransformerアーキテクチャの基本的な知識を持つと良いです。エンコーダ、デコーダ、および注意力がどのように動作するかを知っておくと、異なるTransformerモデルがどのように動作するかを理解するのに役立ちます。始めているか、リフレッシュが必要な場合は、詳細な情報については当社の[コース](https://huggingface.co/course/chapter1/4?fw=pt)をチェックしてください! </Tip> ## Speech and audio [Wav2Vec2](model_doc/wav2vec2)は、未ラベルの音声データで事前トレーニングされ、オーディオ分類および自動音声認識のラベル付きデータでファインチューンされた自己教師モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/wav2vec2_architecture.png"/> </div> このモデルには主に次の4つのコンポーネントがあります。 1. *特徴エンコーダ*:生の音声波形を受け取り、平均値をゼロに正規化し、単位分散に変換し、それを20msごとの特徴ベクトルのシーケンスに変換します。 2. 波形は自然に連続しているため、テキストのシーケンスを単語に分割できるようにできるように、特徴ベクトルは*量子化モジュール*に渡され、離散音声ユニットを学習しようとします。音声ユニットは*コードブック*(語彙と考えることができます)として知られるコードワードのコレクションから選択されます。コードブックから、連続したオーディオ入力を最もよく表すベクトルまたは音声ユニット(ターゲットラベルと考えることができます)が選択され、モデルを介して転送されます。 3. 特徴ベクトルの約半分はランダムにマスクされ、マスクされた特徴ベクトルは*コンテキストネットワーク*に供給されます。これは、相対的な位置エンベッディングも追加するTransformerエンコーダです。 4. コンテキストネットワークの事前トレーニングの目的は*コントラスティブタスク*です。モデルはマスクされた予測の真の量子化音声表現を、偽の予測のセットから予測しなければならず、モデルは最も似たコンテキストベクトルと量子化音声ユニット(ターゲットラベル)を見つけるように促されます。 今、Wav2Vec2は事前トレーニングされているので、オーディオ分類または自動音声認識のためにデータをファインチューンできます! ### Audio classification 事前トレーニングされたモデルをオーディオ分類に使用するには、基本的なWav2Vec2モデルの上にシーケンス分類ヘッドを追加します。分類ヘッドはエンコーダの隠れた状態を受け入れる線形層で、各オーディオフレームから学習された特徴を表します。これらの隠れた状態は長さが異なる可能性があるため、最初に隠れた状態がプールされ、次にクラスラベルに対するロジットに変換されます。ロジットとターゲット間のクロスエントロピー損失が計算され、最も可能性の高いクラスを見つけるために使用されます。 オーディオ分類を試す準備はできましたか?Wav2Vec2をファインチューンして推論に使用する方法を学ぶための完全な[オーディオ分類ガイド](tasks/audio_classification)をチェックしてください! ### Automatic speech recognition 事前トレーニングされたモデルを自動音声認識に使用するには、[connectionist temporal classification(CTC)](glossary#connectionist-temporal-classification-ctc)のための基本的なWav2Vec2モデルの上に言語モデリングヘッドを追加します。言語モデリングヘッドはエンコーダの隠れた状態を受け入れ、それらをロジットに変換します。各ロジットはトークンクラスを表し(トークン数はタスクの語彙から来ます)、ロジットとターゲット間のCTC損失が計算され、次に転写に変換されます。 自動音声認識を試す準備はできましたか?Wav2Vec2をファインチューンして推論に使用する方法を学ぶための完全な[自動音声認識ガイド](tasks/asr)をチェックしてください! ## Computer vision コンピュータビジョンのタスクをアプローチする方法は2つあります。 1. 画像をパッチのシーケンスに分割し、Transformerを使用して並列に処理します。 2. [ConvNeXT](model_doc/convnext)などのモダンなCNNを使用します。これらは畳み込み層を使用しますが、モダンなネットワーク設計を採用しています。 <Tip> サードアプローチでは、Transformerと畳み込みを組み合わせたものもあります(例:[Convolutional Vision Transformer](model_doc/cvt)または[LeViT](model_doc/levit))。これらについては議論しませんが、これらはここで調べる2つのアプローチを組み合わせています。 </Tip> ViTとConvNeXTは画像分類によく使用されますが、オブジェクト検出、セグメンテーション、深度推定などの他のビジョンタスクに対しては、DETR、Mask2Former、GLPNなどが適しています。 ### Image classification ViTとConvNeXTの両方を画像分類に使用できます。主な違いは、ViTが注意メカニズムを使用し、ConvNeXTが畳み込みを使用することです。 #### Transformer [ViT](model_doc/vit)は畳み込みを完全にTransformerアーキテクチャで置き換えます。元のTransformerに精通している場合、ViTの理解は既にほとんど完了しています。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"/> </div> ViTが導入した主な変更点は、画像をTransformerに供給する方法です。 1. 画像は正方形で重ならないパッチのシーケンスに分割され、各パッチはベクトルまたは*パッチ埋め込み*に変換されます。パッチ埋め込みは、適切な入力次元を作成するために2D畳み込み層から生成されます(基本のTransformerの場合、各パッチ埋め込みに768の値があります)。224x224ピクセルの画像がある場合、それを16x16の画像パッチに分割できます。テキストが単語にトークン化されるように、画像はパッチのシーケンスに「トークン化」されます。 2. *学習埋め込み*、つまり特別な `[CLS]` トークンが、BERTのようにパッチ埋め込みの先頭に追加されます。 `[CLS]` トークンの最終的な隠れた状態は、付属の分類ヘッドの入力として使用されます。他の出力は無視されます。このトークンは、モデルが画像の表現をエンコードする方法を学ぶのに役立ちます。 3. パッチと学習埋め込みに追加する最後の要素は*位置埋め込み*です。モデルは画像パッチがどのように並べられているかを知りませんので、位置埋め込みも学習可能で、パッチ埋め込みと同じサイズを持ちます。最後に、すべての埋め込みがTransformerエンコーダに渡されます。 4. 出力、具体的には `[CLS]` トークンの出力だけが、多層パーセプトロンヘッド(MLP)に渡されます。ViTの事前トレーニングの目的は単純に分類です。他の分類ヘッドと同様に、MLPヘッドは出力をクラスラベルに対するロジットに変換し、クロスエントロピー損失を計算して最も可能性の高いクラスを見つけます。 画像分類を試す準備はできましたか?ViTをファインチューンして推論に使用する方法を学ぶための完全な[画像分類ガイド](tasks/image_classification)をチェックしてください! #### CNN <Tip> このセクションでは畳み込みについて簡単に説明していますが、画像の形状とサイズがどのように変化するかを事前に理解していると役立ちます。畳み込みに慣れていない場合は、fastaiの書籍から[Convolution Neural Networks chapter](https://github.com/fastai/fastbook/blob/master/13_convolutions.ipynb)をチェックしてみてください! </Tip> [ConvNeXT](model_doc/convnext)は、性能を向上させるために新しいモダンなネットワーク設計を採用したCNNアーキテクチャです。ただし、畳み込みはモデルの中核にまだあります。高レベルから見た場合、[畳み込み(convolution)](glossary#convolution)は、小さな行列(*カーネル*)が画像のピクセルの小さなウィンドウに乗算される操作です。それは特定のテクスチャや線の曲率などの特徴を計算します。その後、次のピクセルのウィンドウに移動します。畳み込みが移動する距離は*ストライド*として知られています。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convolution.gif"/> </div> <small>[Convolution Arithmetic for Deep Learning](https://arxiv.org/abs/1603.07285) からの基本的なパディングやストライドのない畳み込み。</small> この出力を別の畳み込み層に供給し、各連続した層ごとに、ネットワークはホットドッグやロケットのようなより複雑で抽象的なものを学習します。畳み込み層の間には、特徴の次元を削減し、特徴の位置の変動に対してモデルをより堅牢にするためにプーリング層を追加するのが一般的です。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.png"/> </div> ConvNeXTは、以下の5つの方法でCNNをモダン化しています。 1. 各ステージのブロック数を変更し、画像をより大きなストライドと対応するカーネルサイズで*パッチ化*します。重ならないスライディングウィンドウは、これにより画像をパッチに分割するViTの戦略と似ています。 2. *ボトルネック* レイヤーはチャネル数を縮小し、それを復元します。1x1の畳み込みを実行するのは速く、深さを増やすことができます。逆ボトルネックは逆のことを行い、チャネル数を拡張し、それを縮小します。これはメモリ効率が高いです。 3. ボトルネックレイヤー内の通常の3x3の畳み込み層を、*深度方向の畳み込み*で置き換えます。これは各入力チャネルに個別に畳み込みを適用し、最後にそれらを積み重ねる畳み込みです。これにより、性能向上のためにネットワーク幅が広がります。 4. ViTはグローバル受容野を持っているため、その注意メカニズムのおかげで一度に画像の多くを見ることができます。ConvNeXTはこの効果を再現しようとし、カーネルサイズを7x7に増やします。 5. ConvNeXTはまた、Transformerモデルを模倣するいくつかのレイヤーデザイン変更を行っています。アクティベーションと正規化レイヤーが少なく、活性化関数はReLUの代わりにGELUに切り替え、BatchNormの代わりにLayerNormを使用しています。 畳み込みブロックからの出力は、分類ヘッドに渡され、出力をロジットに変換し、最も可能性の高いラベルを見つけるためにクロスエントロピー損失が計算されます。 ### Object detection [DETR](model_doc/detr)、*DEtection TRansformer*、はCNNとTransformerエンコーダーデコーダーを組み合わせたエンドツーエンドのオブジェクト検出モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/detr_architecture.png"/> </div> 1. 事前トレーニングされたCNN *バックボーン* は、ピクセル値で表される画像を受け取り、それの低解像度の特徴マップを作成します。特徴マップには次元削減のために1x1の畳み込みが適用され、高レベルの画像表現を持つ新しい特徴マップが作成されます。Transformerは連続モデルであるため、特徴マップは特徴ベクトルのシーケンスに平坦化され、位置エンベディングと組み合わせられます。 2. 特徴ベクトルはエンコーダーに渡され、その注意レイヤーを使用して画像表現を学習します。次に、エンコーダーの隠れ状態はデコーダーの*オブジェクトクエリ*と組み合わされます。オブジェクトクエリは、画像の異なる領域に焦点を当てる学習埋め込みで、各注意レイヤーを進行するにつれて更新されます。デコーダーの隠れ状態は、各オブジェクトクエリに対してバウンディングボックスの座標とクラスラベルを予測するフィードフォワードネットワークに渡されます。または、存在しない場合は `no object` が渡されます。 DETRは各オブジェクトクエリを並行してデコードして、*N*の最終的な予測(*N*はクエリの数)を出力します。典型的な自己回帰モデルが1つの要素を1回ずつ予測するのとは異なり、オブジェクト検出はセット予測タスク(`バウンディングボックス`、`クラスラベル`)であり、1回のパスで*N*の予測を行います。 3. 訓練中、DETRは*二部マッチング損失*を使用して、固定された数の予測と固定された一連の正解ラベルを比較します。 *N*のラベルセットに正解ラベルが少ない場合、 `no object` クラスでパディングされます。この損失関数は、DETRに予測と正解ラベルとの間で1対1の割り当てを見つけるように促します。バウンディングボックスまたはクラスラベルのどちらかが正しくない場合、損失が発生します。同様に、DETRが存在しないオブジェクトを予測した場合、罰金が科せられます。これにより、DETRは1つの非常に顕著なオブジェクトに焦点を当てるのではなく、画像内の他のオブジェクトを見つけるように促されます。 DETRの上にオブジェクト検出ヘッドを追加して、クラスラベルとバウンディングボックスの座標を見つけます。オブジェクト検出ヘッドには2つのコンポーネントがあります:デコーダーの隠れ状態をクラスラベルのロジットに変換するための線形層、およびバウンディングボックスを予測するためのMLPです。 オブジェクト検出を試す準備はできましたか?DETROの完全な[オブジェクト検出ガイド](tasks/object_detection)をチェックして、DETROのファインチューニング方法と推論方法を学んでください! ### Image segmentation [Mask2Former](model_doc/mask2former)は、すべての種類の画像セグメンテーションタスクを解決するためのユニバーサルアーキテクチャです。従来のセグメンテーションモデルは通常、インスタンス、セマンティック、またはパノプティックセグメンテーションの特定のサブタスクに合わせて設計されています。Mask2Formerは、それらのタスクのそれぞれを*マスク分類*の問題として捉えます。マスク分類はピクセルを*N*のセグメントにグループ化し、与えられた画像に対して*N*のマスクとそれに対応するクラスラベルを予測します。このセクションでは、Mask2Formerの動作方法を説明し、最後にSegFormerのファインチューニングを試すことができます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png"/> </div> Mask2Formerの主要なコンポーネントは次の3つです。 1. [Swin](model_doc/swin)バックボーンは画像を受け入れ、3つの連続する3x3の畳み込みから低解像度の画像特徴マップを作成します。 2. 特徴マップは*ピクセルデコーダー*に渡され、低解像度の特徴を高解像度のピクセル埋め込みに徐々にアップサンプリングします。ピクセルデコーダーは実際には解像度1/32、1/16、および1/8のオリジナル画像のマルチスケール特徴(低解像度と高解像度の特徴を含む)を生成します。 3. これらの異なるスケールの特徴マップのそれぞれは、高解像度の特徴から小さいオブジェクトをキャプチャするために1回ずつトランスフォーマーデコーダーレイヤーに渡されます。Mask2Formerの要点は、デコーダーの*マスクアテンション*メカニズムです。クロスアテンションが画像全体に注意を向けることができるのに対し、マスクアテンションは画像の特定の領域にのみ焦点を当てます。これは速く、ローカルな画像特徴だけでもモデルが学習できるため、パフォーマンスが向上します。 4. [DETR](tasks_explained#object-detection)と同様に、Mask2Formerも学習されたオブジェクトクエリを使用し、画像の特徴と組み合わせてセットの予測(`クラスラベル`、`マスク予測`)を行います。デコーダーの隠れ状態は線形層に渡され、クラスラベルに対するロジットに変換されます。ロジットと正解ラベル間のクロスエントロピー損失が最も可能性の高いものを見つけます。 マスク予測は、ピクセル埋め込みと最終的なデコーダーの隠れ状態を組み合わせて生成されます。シグモイドクロスエントロピーやダイス損失がロジットと正解マスクの間で最も可能性の高いマスクを見つけます。 セグメンテーションタスクに取り組む準備ができましたか?SegFormerのファインチューニング方法と推論方法を学ぶために、完全な[画像セグメンテーションガイド](tasks/semantic_segmentation)をチェックしてみてください! ### Depth estimation [GLPN](model_doc/glpn)、*Global-Local Path Network*、はセグメンテーションまたは深度推定などの密な予測タスクに適しています。[SegFormer](model_doc/segformer)エンコーダーを軽量デコーダーと組み合わせたTransformerベースの深度推定モデルです。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/glpn_architecture.jpg"/> </div> 1. ViTのように、画像はパッチのシーケンスに分割されますが、これらの画像パッチは小さいです。これはセグメンテーションや深度推定などの密な予測タスクに適しています。画像パッチはパッチ埋め込みに変換されます(パッチ埋め込みの作成方法の詳細については、[画像分類](#image-classification)セクションを参照してください)。これらのパッチ埋め込みはエンコーダーに渡されます。 2. エンコーダーはパッチ埋め込みを受け入れ、複数のエンコーダーブロックを通じてそれらを渡します。各ブロックにはアテンションとMix-FFNレイヤーが含まれています。後者の役割は位置情報を提供することです。各エンコーダーブロックの最後には、階層的表現を作成するための*パッチマージング*レイヤーがあります。隣接するパッチのグループごとの特徴が連結され、連結された特徴に対して線形層が適用され、パッチの数を1/4の解像度に削減します。これが次のエンコーダーブロックへの入力となり、ここではこのプロセス全体が繰り返され、元の画像の1/8、1/16、および1/32の解像度の画像特徴が得られます。 3. 軽量デコーダーは、エンコーダーからの最後の特徴マップ(1/32スケール)を受け取り、それを1/16スケールにアップサンプリングします。その後、特徴は各特徴に対するアテンションマップからローカルとグローバルな特徴を選択して組み合わせる*セレクティブフィーチャーフュージョン(SFF)*モジュールに渡され、1/8にアップサンプリングされます。このプロセスはデコードされた特徴が元の画像と同じサイズになるまで繰り返されます。 4. デコードされた特徴は、最終的な予測を行うためにセマンティックセグメンテーション、深度推定、またはその他の密な予測タスクに供給されます。セマンティックセグメンテーションの場合、特徴はクラス数に対するロジットに変換され、クロスエントロピー損失を使用して最適化されます。深度推定の場合、特徴は深度マップに変換され、平均絶対誤差(MAE)または平均二乗誤差(MSE)損失が使用されます。 ## Natural language processing Transformerは最初に機械翻訳のために設計され、それ以降、ほとんどのNLPタスクを解決するためのデフォルトのアーキテクチャとなっています。一部のタスクはTransformerのエンコーダー構造に適しており、他のタスクはデコーダーに適しています。さらに、一部のタスクではTransformerのエンコーダー-デコーダー構造を使用します。 ### Text classification [BERT](model_doc/bert)はエンコーダーのみのモデルであり、テキストの豊かな表現を学習するために両側の単語に注意を払うことで、深い双方向性を効果的に実装した最初のモデルです。 1. BERTは[WordPiece](tokenizer_summary#wordpiece)トークナイゼーションを使用してテキストのトークン埋め込みを生成します。単一の文と文のペアを区別するために、特別な `[SEP]` トークンが追加されます。 `[CLS]` トークンはすべてのテキストシーケンスの先頭に追加されます。 `[CLS]` トークンとともに最終出力は、分類タスクのための入力として使用されます。BERTはまた、トークンが文のペアの最初または2番目の文に属するかどうかを示すセグメント埋め込みを追加します。 2. BERTは、事前トレーニングで2つの目標を使用します:マスクされた言語モデリングと次の文の予測です。マスクされた言語モデリングでは、入力トークンの一部がランダムにマスクされ、モデルはこれらを予測する必要があります。これにより、モデルが全ての単語を見て「次の単語」を予測することができる双方向性の問題が解決されます。予測されたマスクトークンの最終的な隠れた状態は、ソフトマックスを使用した単語のマスクを予測するためのフィードフォワードネットワークに渡されます。 2番目の事前トレーニングオブジェクトは次の文の予測です。モデルは文Aの後に文Bが続くかどうかを予測する必要があります。半分の場合、文Bは次の文であり、残りの半分の場合、文Bはランダムな文です。予測(次の文かどうか)は、2つのクラス(`IsNext`および`NotNext`)に対するソフトマックスを持つフィードフォワードネットワークに渡されます。 3. 入力埋め込みは、最終的な隠れた状態を出力するために複数のエンコーダーレイヤーを介して渡されます。 事前訓練済みモデルをテキスト分類に使用するには、ベースのBERTモデルの上にシーケンス分類ヘッドを追加します。シーケンス分類ヘッドは最終的な隠れた状態を受け入れ、それらをロジットに変換するための線形層です。クロスエントロピー損失は、ロジットとターゲット間で最も可能性の高いラベルを見つけるために計算されます。 テキスト分類を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[テキスト分類ガイド](tasks/sequence_classification)をチェックしてみてください! ### Token classification BERTを名前エンティティ認識(NER)などのトークン分類タスクに使用するには、ベースのBERTモデルの上にトークン分類ヘッドを追加します。トークン分類ヘッドは最終的な隠れた状態を受け入れ、それらをロジットに変換するための線形層です。クロスエントロピー損失は、ロジットと各トークン間で最も可能性の高いラベルを見つけるために計算されます。 トークン分類を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[トークン分類ガイド](tasks/token_classification)をチェックしてみてください! ### Question answering BERTを質問応答に使用するには、ベースのBERTモデルの上にスパン分類ヘッドを追加します。この線形層は最終的な隠れた状態を受け入れ、回答に対応するテキストの「スパン」開始と終了のロジットを計算します。クロスエントロピー損失は、ロジットとラベル位置との間で最も可能性の高いテキストスパンを見つけるために計算されます。 質問応答を試してみる準備はできましたか?DistilBERTを微調整し、推論に使用する方法を学ぶために、完全な[質問応答ガイド](tasks/question_answering)をチェックしてみてください! <Tip> 💡 注意してください。一度事前トレーニングが完了したBERTを使用してさまざまなタスクに簡単に適用できることに注目してください。必要なのは、事前トレーニング済みモデルに特定のヘッドを追加して、隠れた状態を所望の出力に変換することだけです! </Tip> ### Text generation [GPT-2](model_doc/gpt2)は大量のテキストで事前トレーニングされたデコーダー専用モデルです。プロンプトを与えると説得力のあるテキストを生成し、明示的にトレーニングされていないにもかかわらず、質問応答などの他のNLPタスクも完了できます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gpt2_architecture.png"/> </div> 1. GPT-2は[バイトペアエンコーディング(BPE)](tokenizer_summary#bytepair-encoding-bpe)を使用して単語をトークナイズし、トークン埋め込みを生成します。位置エンコーディングがトークン埋め込みに追加され、各トークンの位置を示します。入力埋め込みは複数のデコーダーブロックを介して最終的な隠れた状態を出力するために渡されます。各デコーダーブロック内で、GPT-2は「マスクされた自己注意」レイヤーを使用します。これは、GPT-2が未来のトークンに注意を払うことはできないことを意味します。GPT-2は左側のトークンにのみ注意を払うことが許可されています。これはBERTの[`mask`]トークンとは異なり、マスクされた自己注意では未来のトークンに対してスコアを`0`に設定するための注意マスクが使用されます。 2. デコーダーからの出力は、言語モデリングヘッドに渡され、最終的な隠れた状態をロジットに変換するための線形変換を実行します。ラベルはシーケンス内の次のトークンであり、これはロジットを右に1つずらして生成されます。クロスエントロピー損失は、シフトされたロジットとラベル間で計算され、次に最も可能性の高いトークンを出力します。 GPT-2の事前トレーニングの目標は完全に[因果言語モデリング](glossary#causal-language-modeling)に基づいており、シーケンス内の次の単語を予測します。これにより、GPT-2はテキスト生成を含むタスクで特に優れた性能を発揮します。 テキスト生成を試してみる準備はできましたか?DistilGPT-2を微調整し、推論に使用する方法を学ぶために、完全な[因果言語モデリングガイド](tasks/language_modeling#causal-language-modeling)をチェックしてみてください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip> ### Summarization [BART](model_doc/bart) や [T5](model_doc/t5) のようなエンコーダーデコーダーモデルは、要約タスクのシーケンス・トゥ・シーケンス・パターンに設計されています。このセクションでは、BARTの動作方法を説明し、最後にT5の微調整を試すことができます。 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bart_architecture.png"/> </div> 1. BARTのエンコーダーアーキテクチャは、BERTと非常に似ており、テキストのトークンと位置エンベディングを受け入れます。BARTは、入力を破壊してからデコーダーで再構築することによって事前トレーニングされます。特定の破壊戦略を持つ他のエンコーダーとは異なり、BARTは任意の種類の破壊を適用できます。ただし、*テキストインフィリング*破壊戦略が最適です。テキストインフィリングでは、いくつかのテキストスパンが**単一の** [`mask`] トークンで置き換えられます。これは重要です、なぜならモデルはマスクされたトークンを予測しなければならず、モデルに欠落トークンの数を予測させるからです。入力埋め込みとマスクされたスパンはエンコーダーを介して最終的な隠れた状態を出力しますが、BERTとは異なり、BARTは単語を予測するための最終的なフィードフォワードネットワークを最後に追加しません。 2. エンコーダーの出力はデコーダーに渡され、デコーダーはエンコーダーの出力からマスクされたトークンと非破壊トークンを予測する必要があります。これにより、デコーダーは元のテキストを復元するのに役立つ追加のコンテキストが提供されます。デコーダーからの出力は言語モデリングヘッドに渡され、隠れた状態をロジットに変換するための線形変換を実行します。クロスエントロピー損失は、ロジットとラベルの間で計算され、ラベルは単に右にシフトされたトークンです。 要約を試す準備はできましたか?T5を微調整して推論に使用する方法を学ぶために、完全な[要約ガイド](tasks/summarization)をご覧ください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip> ### Translation 翻訳は、もう一つのシーケンス・トゥ・シーケンス・タスクの例であり、[BART](model_doc/bart) や [T5](model_doc/t5) のようなエンコーダーデコーダーモデルを使用して実行できます。このセクションでは、BARTの動作方法を説明し、最後にT5の微調整を試すことができます。 BARTは、ソース言語をターゲット言語にデコードできるようにするために、別個にランダムに初期化されたエンコーダーを追加することで翻訳に適応します。この新しいエンコーダーの埋め込みは、元の単語埋め込みの代わりに事前トレーニング済みのエンコーダーに渡されます。ソースエンコーダーは、モデルの出力からのクロスエントロピー損失を用いてソースエンコーダー、位置エンベディング、および入力エンベディングを更新することによって訓練されます。この最初のステップではモデルパラメータが固定され、すべてのモデルパラメータが2番目のステップで一緒に訓練されます。 その後、翻訳のために多言語版のmBARTが登場し、多言語で事前トレーニングされたモデルとして利用可能です。 翻訳を試す準備はできましたか?T5を微調整して推論に使用する方法を学ぶために、完全な[翻訳ガイド](tasks/summarization)をご覧ください! <Tip> テキスト生成に関する詳細は、[テキスト生成戦略](generation_strategies)ガイドをチェックしてみてください! </Tip>
transformers/docs/source/ja/tasks_explained.md/0
{ "file_path": "transformers/docs/source/ja/tasks_explained.md", "repo_id": "transformers", "token_count": 16553 }
275
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AutoClass로 사전 학습된 인스턴스 로드[[load-pretrained-instances-with-an-autoclass]] 트랜스포머 아키텍처가 매우 다양하기 때문에 체크포인트에 맞는 아키텍처를 생성하는 것이 어려울 수 있습니다. 라이브러리를 쉽고 간단하며 유연하게 사용하기 위한 Transformer 핵심 철학의 일환으로, `AutoClass`는 주어진 체크포인트에서 올바른 아키텍처를 자동으로 추론하여 로드합니다. `from_pretrained()` 메서드를 사용하면 모든 아키텍처에 대해 사전 학습된 모델을 빠르게 로드할 수 있으므로 모델을 처음부터 학습하는 데 시간과 리소스를 투입할 필요가 없습니다. 체크포인트에 구애받지 않는 코드를 생성한다는 것은 코드가 한 체크포인트에서 작동하면 아키텍처가 다르더라도 다른 체크포인트(유사한 작업에 대해 학습된 경우)에서도 작동한다는 것을 의미합니다. <Tip> 아키텍처는 모델의 골격을 의미하며 체크포인트는 주어진 아키텍처에 대한 가중치입니다. 예를 들어, [BERT](https://huggingface.co/google-bert/bert-base-uncased)는 아키텍처이고, `google-bert/bert-base-uncased`는 체크포인트입니다. 모델은 아키텍처 또는 체크포인트를 의미할 수 있는 일반적인 용어입니다. </Tip> 이 튜토리얼에서는 다음을 학습합니다: * 사전 학습된 토크나이저 로드하기. * 사전 학습된 이미지 프로세서 로드하기. * 사전 학습된 특징 추출기 로드하기. * 사전 훈련된 프로세서 로드하기. * 사전 학습된 모델 로드하기. ## AutoTokenizer[[autotokenizer]] 거의 모든 NLP 작업은 토크나이저로 시작됩니다. 토크나이저는 사용자의 입력을 모델에서 처리할 수 있는 형식으로 변환합니다. [`AutoTokenizer.from_pretrained`]로 토크나이저를 로드합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` 그리고 아래와 같이 입력을 토큰화합니다: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## AutoImageProcessor[[autoimageprocessor]] 비전 작업의 경우 이미지 프로세서가 이미지를 올바른 입력 형식으로 처리합니다. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoFeatureExtractor[[autofeatureextractor]] 오디오 작업의 경우 특징 추출기가 오디오 신호를 올바른 입력 형식으로 처리합니다. [`AutoFeatureExtractor.from_pretrained`]로 특징 추출기를 로드합니다: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## AutoProcessor[[autoprocessor]] 멀티모달 작업에는 두 가지 유형의 전처리 도구를 결합한 프로세서가 필요합니다. 예를 들어 LayoutLMV2 모델에는 이미지를 처리하는 이미지 프로세서와 텍스트를 처리하는 토크나이저가 필요하며, 프로세서는 이 두 가지를 결합합니다. [`AutoProcessor.from_pretrained()`]로 프로세서를 로드합니다: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## AutoModel[[automodel]] <frameworkcontent> <pt> 마지막으로 AutoModelFor클래스를 사용하면 주어진 작업에 대해 미리 학습된 모델을 로드할 수 있습니다 (사용 가능한 작업의 전체 목록은 [여기](model_doc/auto)를 참조하세요). 예를 들어, [`AutoModelForSequenceClassification.from_pretrained`]를 사용하여 시퀀스 분류용 모델을 로드할 수 있습니다: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 동일한 체크포인트를 쉽게 재사용하여 다른 작업에 아키텍처를 로드할 수 있습니다: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> PyTorch모델의 경우 `from_pretrained()` 메서드는 내부적으로 피클을 사용하여 안전하지 않은 것으로 알려진 `torch.load()`를 사용합니다. 일반적으로 신뢰할 수 없는 소스에서 가져왔거나 변조되었을 수 있는 모델은 로드하지 마세요. 허깅 페이스 허브에서 호스팅되는 공개 모델의 경우 이러한 보안 위험이 부분적으로 완화되며, 각 커밋 시 멀웨어를 [검사합니다](https://huggingface.co/docs/hub/security-malware). GPG를 사용해 서명된 [커밋 검증](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg)과 같은 모범사례는 [문서](https://huggingface.co/docs/hub/security)를 참조하세요. 텐서플로우와 Flax 체크포인트는 영향을 받지 않으며, `from_pretrained`메서드에 `from_tf` 와 `from_flax` 키워드 가변 인자를 사용하여 이 문제를 우회할 수 있습니다. </Tip> 일반적으로 AutoTokenizer 클래스와 AutoModelFor 클래스를 사용하여 미리 학습된 모델 인스턴스를 로드하는 것이 좋습니다. 이렇게 하면 매번 올바른 아키텍처를 로드할 수 있습니다. 다음 [튜토리얼](preprocessing)에서는 새롭게 로드한 토크나이저, 이미지 프로세서, 특징 추출기를 사용하여 미세 튜닝용 데이터 세트를 전처리하는 방법에 대해 알아봅니다. </pt> <tf> 마지막으로 `TFAutoModelFor` 클래스를 사용하면 주어진 작업에 대해 사전 훈련된 모델을 로드할 수 있습니다. (사용 가능한 작업의 전체 목록은 [여기](model_doc/auto)를 참조하세요. 예를 들어, [`TFAutoModelForSequenceClassification.from_pretrained`]로 시퀀스 분류를 위한 모델을 로드합니다: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 쉽게 동일한 체크포인트를 재사용하여 다른 작업에 아키텍처를 로드할 수 있습니다: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 일반적으로, `AutoTokenizer`클래스와 `TFAutoModelFor` 클래스를 사용하여 미리 학습된 모델 인스턴스를 로드하는 것이 좋습니다. 이렇게 하면 매번 올바른 아키텍처를 로드할 수 있습니다. 다음 [튜토리얼](preprocessing)에서는 새롭게 로드한 토크나이저, 이미지 프로세서, 특징 추출기를 사용하여 미세 튜닝용 데이터 세트를 전처리하는 방법에 대해 알아봅니다. </tf> </frameworkcontent>
transformers/docs/source/ko/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/ko/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 5250 }
276
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaMA [[llama]] ## 개요 [[overview]] LLaMA 모델은 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample에 의해 제안된 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)에서 소개되었습니다. 이 모델은 7B에서 65B개의 파라미터까지 다양한 크기의 기초 언어 모델을 모아놓은 것입니다. 논문의 초록은 다음과 같습니다: *"LLaMA는 7B에서 65B개의 파라미터 수를 가진 기초 언어 모델의 모음입니다. 우리는 수조 개의 토큰으로 모델을 훈련시켰고, 공개적으로 이용 가능한 데이터셋만을 사용하여 최고 수준의 모델을 훈련시킬 수 있음을 보여줍니다. 특히, LLaMA-13B 모델은 대부분의 벤치마크에서 GPT-3 (175B)를 능가하며, LLaMA-65B는 최고 수준의 모델인 Chinchilla-70B와 PaLM-540B에 버금가는 성능을 보입니다. 우리는 모든 모델을 연구 커뮤니티에 공개합니다."* 팁: - LLaMA 모델의 가중치는 [이 양식](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form)을 작성하여 얻을 수 있습니다. - 가중치를 다운로드한 후에는 이를 [변환 스크립트](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py)를 사용하여 Hugging Face Transformers 형식으로 변환해야합니다. 변환 스크립트를 실행하려면 아래의 예시 명령어를 참고하세요: ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` - 변환을 하였다면 모델과 토크나이저는 다음과 같이 로드할 수 있습니다: ```python from transformers import LlamaForCausalLM, LlamaTokenizer tokenizer = LlamaTokenizer.from_pretrained("/output/path") model = LlamaForCausalLM.from_pretrained("/output/path") ``` 스크립트를 실행하기 위해서는 모델을 float16 정밀도로 전부 로드할 수 있을 만큼의 충분한 CPU RAM이 필요합니다. (가장 큰 버전의 모델이 여러 체크포인트로 나뉘어 있더라도, 각 체크포인트는 모델의 각 가중치의 일부를 포함하고 있기 때문에 모든 체크포인트를 RAM에 로드해야 합니다) 65B 모델의 경우, 총 130GB의 RAM이 필요합니다. - LLaMA 토크나이저는 [sentencepiece](https://github.com/google/sentencepiece)를 기반으로 하는 BPE 모델입니다. sentencepiece의 특징 중 하나는 시퀀스를 디코딩할 때 첫 토큰이 단어의 시작이라면 (예를 들어 "Banana"), 토크나이저는 문자열 앞에 공백을 추가하지 않는다는 것입니다. 이 모델은 [BlackSamorez](https://huggingface.co/BlackSamorez)의 기여와 함께, [zphang](https://huggingface.co/zphang)에 의해 제공되었습니다. Hugging Face에서의 구현 코드는 GPT-NeoX를 기반으로 하며 [여기](https://github.com/EleutherAI/gpt-neox)에서 찾을 수 있고, 저자의 코드 원본은 [여기](https://github.com/facebookresearch/llama)에서 확인할 수 있습니다. 원래 LLaMA 모델을 기반으로 Meta AI에서 몇 가지 후속 작업을 발표했습니다: - **Llama2**: Llama2는 구조적인 몇 가지 수정(Grouped Query Attention)을 통해 개선된 버전이며, 2조 개의 토큰으로 사전 훈련이 되어 있습니다. Llama2에 대한 자세한 내용은 [이 문서](llama2)를 참고하세요. ## 리소스 [[resources]] LLaMA를 시작하는 데 도움이 될 Hugging Face 및 커뮤니티(🌎로 표시)의 공식 자료 목록입니다. 여기에 자료를 제출하고 싶다면 Pull Request를 올려주세요! 추가할 자료는 기존의 자료와 중복되지 않고 새로운 내용을 보여주는 것이 좋습니다. <PipelineTag pipeline="text-classification"/> - LLaMA 모델을 텍스트 분류 작업에 적용하기 위한 프롬프트 튜닝 방법에 대한 [노트북](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) 🌎 <PipelineTag pipeline="question-answering"/> - [Stack Exchange](https://stackexchange.com/)에서 질문에 답하는 LLaMA를 훈련하는 방법을 위한 [StackLLaMA: RLHF로 LLaMA를 훈련하는 실전 가이드](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf) 🌎 ⚗️ 최적화 - 제한된 메모리를 가진 GPU에서 xturing 라이브러리를 사용하여 LLaMA 모델을 미세 조정하는 방법에 대한 [노트북](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) 🌎 ⚡️ 추론 - 🤗 PEFT 라이브러리의 PeftModel을 사용하여 LLaMA 모델을 실행하는 방법에 대한 [노트북](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) 🌎 - LangChain을 사용하여 PEFT 어댑터 LLaMA 모델을 로드하는 방법에 대한 [노트북](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) 🌎 🚀 배포 - 🤗 PEFT 라이브러리와 사용자 친화적인 UI로 LLaMA 모델을 미세 조정하는 방법에 대한 [노트북](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) 🌎 - Amazon SageMaker에서 텍스트 생성을 위해 Open-LLaMA 모델을 배포하는 방법에 대한 [노트북](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) 🌎 ## LlamaConfig [[llamaconfig]] [[autodoc]] LlamaConfig ## LlamaTokenizer [[llamatokenizer]] [[autodoc]] LlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LlamaTokenizerFast [[llamatokenizerfast]] [[autodoc]] LlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## LlamaModel [[llamamodel]] [[autodoc]] LlamaModel - forward ## LlamaForCausalLM [[llamaforcausallm]] [[autodoc]] LlamaForCausalLM - forward ## LlamaForSequenceClassification [[llamaforsequenceclassification]] [[autodoc]] LlamaForSequenceClassification - forward
transformers/docs/source/ko/model_doc/llama.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/llama.md", "repo_id": "transformers", "token_count": 4406 }
277
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # TensorFlow로 TPU에서 훈련하기[[training-on-tpu-with-tensorflow]] <Tip> 자세한 설명이 필요하지 않고 바로 TPU 샘플 코드를 시작하고 싶다면 [우리의 TPU 예제 노트북!](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)을 확인하세요. </Tip> ### TPU가 무엇인가요?[[what-is-a-tpu]] TPU는 **텐서 처리 장치**입니다. Google에서 설계한 하드웨어로, GPU처럼 신경망 내에서 텐서 연산을 더욱 빠르게 처리하기 위해 사용됩니다. 네트워크 훈련과 추론 모두에 사용할 수 있습니다. 일반적으로 Google의 클라우드 서비스를 통해 이용할 수 있지만, Google Colab과 Kaggle Kernel을 통해 소규모 TPU를 무료로 직접 이용할 수도 있습니다. [🤗 Transformers의 모든 Tensorflow 모델은 Keras 모델](https://huggingface.co/blog/tensorflow-philosophy)이기 때문에, 이 문서에서 다루는 대부분의 메소드는 대체로 모든 Keras 모델을 위한 TPU 훈련에 적용할 수 있습니다! 하지만 Transformer와 데이터 세트의 HuggingFace 생태계(hug-o-system?)에 특화된 몇 가지 사항이 있으며, 해당 사항에 대해 설명할 때 반드시 언급하도록 하겠습니다. ### 어떤 종류의 TPU가 있나요?[[what-kinds-of-tpu-are-available]] 신규 사용자는 TPU의 범위와 다양한 이용 방법에 대해 매우 혼란스러워하는 경우가 많습니다. **TPU 노드**와 **TPU VM**의 차이점은 가장 먼저 이해해야 할 핵심적인 구분 사항입니다. **TPU 노드**를 사용한다면, 실제로는 원격 TPU를 간접적으로 이용하는 것입니다. 네트워크와 데이터 파이프라인을 초기화한 다음, 이를 원격 노드로 전달할 별도의 VM이 필요합니다. Google Colab에서 TPU를 사용하는 경우, **TPU 노드** 방식으로 이용하게 됩니다. TPU 노드를 사용하는 것은 이를 사용하지 않는 사용자에게 예기치 않은 현상이 발생하기도 합니다! 특히, TPU는 파이썬 코드를 실행하는 기기(machine)와 물리적으로 다른 시스템에 있기 때문에 로컬 기기에 데이터를 저장할 수 없습니다. 즉, 컴퓨터의 내부 저장소에서 가져오는 데이터 파이프라인은 절대 작동하지 않습니다! 로컬 기기에 데이터를 저장하는 대신에, 데이터 파이프라인이 원격 TPU 노드에서 실행 중일 때에도 데이터 파이프라인이 계속 이용할 수 있는 Google Cloud Storage에 데이터를 저장해야 합니다. <Tip> 메모리에 있는 모든 데이터를 `np.ndarray` 또는 `tf.Tensor`로 맞출 수 있다면, Google Cloud Storage에 업로드할 필요 없이, Colab 또는 TPU 노드를 사용해서 해당 데이터에 `fit()` 할 수 있습니다. </Tip> <Tip> **🤗특수한 Hugging Face 팁🤗:** TF 코드 예제에서 볼 수 있는 `Dataset.to_tf_dataset()` 메소드와 그 상위 래퍼(wrapper)인 `model.prepare_tf_dataset()`는 모두 TPU 노드에서 작동하지 않습니다. 그 이유는 `tf.data.Dataset`을 생성하더라도 “순수한” `tf.data` 파이프라인이 아니며 `tf.numpy_function` 또는 `Dataset.from_generator()`를 사용하여 기본 HuggingFace `Dataset`에서 데이터를 전송하기 때문입니다. 이 HuggingFace `Dataset`는 로컬 디스크에 있는 데이터로 지원되며 원격 TPU 노드가 읽을 수 없습니다. </Tip> TPU를 이용하는 두 번째 방법은 **TPU VM**을 사용하는 것입니다. TPU VM을 사용할 때, GPU VM에서 훈련하는 것과 같이 TPU가 장착된 기기에 직접 연결합니다. 특히 데이터 파이프라인과 관련하여, TPU VM은 대체로 작업하기 더 쉽습니다. 위의 모든 경고는 TPU VM에는 해당되지 않습니다! 이 문서는 의견이 포함된 문서이며, 저희의 의견이 여기에 있습니다: **가능하면 TPU 노드를 사용하지 마세요.** TPU 노드는 TPU VM보다 더 복잡하고 디버깅하기가 더 어렵습니다. 또한 향후에는 지원되지 않을 가능성이 높습니다. Google의 최신 TPU인 TPUv4는 TPU VM으로만 이용할 수 있으므로, TPU 노드는 점점 더 "구식" 이용 방법이 될 것으로 전망됩니다. 그러나 TPU 노드를 사용하는 Colab과 Kaggle Kernel에서만 무료 TPU 이용이 가능한 것으로 확인되어, 필요한 경우 이를 다루는 방법을 설명해 드리겠습니다! 이에 대한 자세한 설명이 담긴 코드 샘플은 [TPU 예제 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)에서 확인하시기 바랍니다. ### 어떤 크기의 TPU를 사용할 수 있나요?[[what-sizes-of-tpu-are-available]] 단일 TPU(v2-8/v3-8/v4-8)는 8개의 복제본(replicas)을 실행합니다. TPU는 수백 또는 수천 개의 복제본을 동시에 실행할 수 있는 **pod**로 존재합니다. 단일 TPU를 하나 이상 사용하지만 전체 Pod보다 적게 사용하는 경우(예를 들면, v3-32), TPU 구성을 **pod 슬라이스**라고 합니다. Colab을 통해 무료 TPU에 이용하는 경우, 기본적으로 단일 v2-8 TPU를 제공받습니다. ### XLA에 대해 들어본 적이 있습니다. XLA란 무엇이고 TPU와 어떤 관련이 있나요?[[i-keep-hearing-about-this-xla-thing-whats-xla-and-how-does-it-relate-to-tpus]] XLA는 최적화 컴파일러로, TensorFlow와 JAX에서 모두 사용됩니다. JAX에서는 유일한 컴파일러이지만, TensorFlow에서는 선택 사항입니다(하지만 TPU에서는 필수입니다!). Keras 모델을 훈련할 때 이를 활성화하는 가장 쉬운 방법은 `jit_compile=True` 인수를 `model.compile()`에 전달하는 것입니다. 오류가 없고 성능이 양호하다면, TPU로 전환할 준비가 되었다는 좋은 신호입니다! TPU에서 디버깅하는 것은 대개 CPU/GPU보다 조금 더 어렵기 때문에, TPU에서 시도하기 전에 먼저 XLA로 CPU/GPU에서 코드를 실행하는 것을 권장합니다. 물론 오래 학습할 필요는 없습니다. 즉, 모델과 데이터 파이프라인이 예상대로 작동하는지 확인하기 위해 몇 단계만 거치면 됩니다. <Tip> XLA로 컴파일된 코드는 대체로 더 빠릅니다. 따라서 TPU에서 실행할 계획이 없더라도, `jit_compile=True`를 추가하면 성능이 향상될 수 있습니다. 하지만 XLA 호환성에 대한 아래 주의 사항을 반드시 확인하세요! </Tip> <Tip warning={true}> **뼈아픈 경험에서 얻은 팁:** `jit_compile=True`를 사용하면 속도를 높이고 CPU/GPU 코드가 XLA와 호환되는지 검증할 수 있는 좋은 방법이지만, 실제 TPU에서 훈련할 때 그대로 남겨두면 많은 문제를 초래할 수 있습니다. XLA 컴파일은 TPU에서 암시적으로 이뤄지므로, 실제 TPU에서 코드를 실행하기 전에 해당 줄을 제거하는 것을 잊지 마세요! </Tip> ### 제 XLA 모델과 호환하려면 어떻게 해야 하나요?[[how-do-i-make-my-model-xla-compatible]] 대부분의 경우, 여러분의 코드는 이미 XLA와 호환될 것입니다! 그러나 표준 TensorFlow에서 작동하지만, XLA에서는 작동하지 않는 몇 가지 사항이 있습니다. 이를 아래 세 가지 핵심 규칙으로 간추렸습니다: <Tip> **특수한 HuggingFace 팁🤗:** 저희는 TensorFlow 모델과 손실 함수를 XLA와 호환되도록 재작성하는 데 많은 노력을 기울였습니다. 저희의 모델과 손실 함수는 대개 기본적으로 규칙 #1과 #2를 따르므로 `transformers` 모델을 사용하는 경우, 이를 건너뛸 수 있습니다. 하지만 자체 모델과 손실 함수를 작성할 때는 이러한 규칙을 잊지 마세요! </Tip> #### XLA 규칙 #1: 코드에서 “데이터 종속 조건문”을 사용할 수 없습니다[[xla-rule-1-your-code-cannot-have-datadependent-conditionals]] 어떤 `if`문도 `tf.Tensor` 내부의 값에 종속될 수 없다는 것을 의미합니다. 예를 들어, 이 코드 블록은 XLA로 컴파일할 수 없습니다! ```python if tf.reduce_sum(tensor) > 10: tensor = tensor / 2.0 ``` 처음에는 매우 제한적으로 보일 수 있지만, 대부분의 신경망 코드에서는 이를 수행할 필요가 없습니다. `tf.cond`를 사용하거나([여기](https://www.tensorflow.org/api_docs/python/tf/cond) 문서를 참조), 다음과 같이 조건문을 제거하고 대신 지표 변수를 사용하는 영리한 수학 트릭을 찾아내어 이 제한을 우회할 수 있습니다: ```python sum_over_10 = tf.cast(tf.reduce_sum(tensor) > 10, tf.float32) tensor = tensor / (1.0 + sum_over_10) ``` 이 코드는 위의 코드와 정확히 동일한 효과를 구현하지만, 조건문을 제거하여 문제 없이 XLA로 컴파일되도록 합니다! #### XLA 규칙 #2: 코드에서 "데이터 종속 크기"를 가질 수 없습니다[[xla-rule-2-your-code-cannot-have-datadependent-shapes]] 코드에서 모든 `tf.Tensor` 객체의 크기가 해당 값에 종속될 수 없다는 것을 의미합니다. 예를 들어, `tf.unique` 함수는 입력에서 각 고유 값의 인스턴스 하나를 포함하는 `tensor`를 반환하기 때문에 XLA로 컴파일할 수 없습니다. 이 출력의 크기는 입력 `Tensor`가 얼마나 반복적인지에 따라 분명히 달라질 것이므로, XLA는 이를 처리하지 못합니다! 일반적으로, 대부분의 신경망 코드는 기본값으로 규칙 2를 따릅니다. 그러나 문제가 되는 몇 가지 대표적인 사례가 있습니다. 가장 흔한 사례 중 하나는 **레이블 마스킹**을 사용하여 손실(loss)을 계산할 때, 해당 위치를 무시하도록 나타내기 위해 레이블을 음수 값으로 설정하는 경우입니다. 레이블 마스킹을 지원하는 NumPy나 PyTorch 손실 함수를 보면 [불 인덱싱](https://numpy.org/doc/stable/user/basics.indexing.html#boolean-array-indexing)을 사용하는 다음과 같은 코드를 자주 접할 수 있습니다: ```python label_mask = labels >= 0 masked_outputs = outputs[label_mask] masked_labels = labels[label_mask] loss = compute_loss(masked_outputs, masked_labels) mean_loss = torch.mean(loss) ``` 이 코드는 NumPy나 PyTorch에서는 문제 없이 작동하지만, XLA에서는 손상됩니다! 왜 그럴까요? 얼마나 많은 위치가 마스킹되는지에 따라 `masked_outputs`와 `masked_labels`의 크기가 달라져서, **데이터 종속 크기**가 되기 때문입니다. 그러나 규칙 #1과 마찬가지로, 이 코드를 다시 작성하면 데이터 종속적 모양 크기가 정확히 동일한 출력을 산출할 수 있습니다. ```python label_mask = tf.cast(labels >= 0, tf.float32) loss = compute_loss(outputs, labels) loss = loss * label_mask # Set negative label positions to 0 mean_loss = tf.reduce_sum(loss) / tf.reduce_sum(label_mask) ``` 여기서, 모든 위치에 대한 손실을 계산하지만, 평균을 계산할 때 분자와 분모 모두에서 마스크된 위치를 0으로 처리합니다. 이는 데이터 종속 크기를 방지하고 XLA 호환성을 유지하면서 첫 번째 블록과 정확히 동일한 결과를 산출합니다. 규칙 #1에서와 동일한 트릭을 사용하여 `tf.bool`을 `tf.float32`로 변환하고 이를 지표 변수로 사용합니다. 해당 트릭은 매우 유용하며, 자체 코드를 XLA로 변환해야 할 경우 기억해 두세요! #### XLA 규칙 #3: XLA는 각기 다른 입력 크기가 나타날 때마다 모델을 다시 컴파일해야 합니다[[xla-rule-3-xla-will-need-to-recompile-your-model-for-every-different-input-shape-it-sees]] 이것은 가장 큰 문제입니다. 입력 크기가 매우 가변적인 경우, XLA는 모델을 반복해서 다시 컴파일해야 하므로 성능에 큰 문제가 발생할 수 있습니다. 이 문제는 토큰화 후 입력 텍스트의 길이가 가변적인 NLP 모델에서 주로 발생합니다. 다른 모달리티에서는 정적 크기가 더 흔하며, 해당 규칙이 훨씬 덜 문제시 됩니다. 규칙 #3을 어떻게 우회할 수 있을까요? 핵심은 **패딩**입니다. 모든 입력을 동일한 길이로 패딩한 다음, `attention_mask`를 사용하면 어떤 XLA 문제도 없이 가변 크기에서 가져온 것과 동일한 결과를 가져올 수 있습니다. 그러나 과도한 패딩은 심각한 속도 저하를 야기할 수도 있습니다. 모든 샘플을 전체 데이터 세트의 최대 길이로 패딩하면, 무한한 패딩 토큰으로 구성된 배치가 생성되어 많은 연산과 메모리가 낭비될 수 있습니다! 이 문제에 대한 완벽한 해결책은 없습니다. 하지만, 몇 가지 트릭을 시도해볼 수 있습니다. 한 가지 유용한 트릭은 **샘플 배치를 32 또는 64 토큰과 같은 숫자의 배수까지 패딩하는 것입니다.** 이는 토큰 수가 소폭 증가하지만, 모든 입력 크기가 32 또는 64의 배수여야 하기 때문에 고유한 입력 크기의 수가 대폭 줄어듭니다. 고유한 입력 크기가 적다는 것은 XLA 컴파일 횟수가 적어진다는 것을 의미합니다! <Tip> **🤗특수한 HuggingFace 팁🤗:** 토크나이저와 데이터 콜레이터에 도움이 될 수 있는 메소드가 있습니다. 토크나이저를 불러올 때 `padding="max_length"` 또는 `padding="longest"`를 사용하여 패딩된 데이터를 출력하도록 할 수 있습니다. 토크나이저와 데이터 콜레이터는 나타나는 고유한 입력 크기의 수를 줄이기 위해 사용할 수 있는 `pad_to_multiple_of` 인수도 있습니다! </Tip> ### 실제 TPU로 모델을 훈련하려면 어떻게 해야 하나요?[[how-do-i-actually-train-my-model-on-tpu]] 훈련이 XLA와 호환되고 (TPU 노드/Colab을 사용하는 경우) 데이터 세트가 적절하게 준비되었다면, TPU에서 실행하는 것은 놀랍도록 쉽습니다! 코드에서 몇 줄만 추가하여, TPU를 초기화하고 모델과 데이터 세트가 `TPUStrategy` 범위 내에 생성되도록 변경하면 됩니다. [우리의 TPU 예제 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb)을 참조하여 실제로 작동하는 모습을 확인해 보세요! ### 요약[[summary]] 여기에 많은 내용이 포함되어 있으므로, TPU 훈련을 위한 모델을 준비할 때 따를 수 있는 간략한 체크리스트로 요약해 보겠습니다: - 코드가 XLA의 세 가지 규칙을 따르는지 확인합니다. - CPU/GPU에서 `jit_compile=True`로 모델을 컴파일하고 XLA로 훈련할 수 있는지 확인합니다. - 데이터 세트를 메모리에 가져오거나 TPU 호환 데이터 세트를 가져오는 방식을 사용합니다([노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) 참조) - 코드를 Colab(accelerator가 “TPU”로 설정됨) 또는 Google Cloud의 TPU VM으로 마이그레이션합니다. - TPU 초기화 코드를 추가합니다([노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) 참조) - `TPUStrategy`를 생성하고 데이터 세트를 가져오는 것과 모델 생성이 `strategy.scope()` 내에 있는지 확인합니다([노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) 참조) - TPU로 이동할 때 `jit_compile=True`를 다시 설정하는 것을 잊지 마세요! - 🙏🙏🙏🥺🥺🥺 - model.fit()을 불러옵니다. - 여러분이 해냈습니다!
transformers/docs/source/ko/perf_train_tpu_tf.md/0
{ "file_path": "transformers/docs/source/ko/perf_train_tpu_tf.md", "repo_id": "transformers", "token_count": 12239 }
278
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 이미지 캡셔닝[[image-captioning]] [[open-in-colab]] 이미지 캡셔닝(Image captioning)은 주어진 이미지에 대한 캡션을 예측하는 작업입니다. 이미지 캡셔닝은 시각 장애인이 다양한 상황을 탐색하는 데 도움을 줄 수 있도록 시각 장애인을 보조하는 등 실생활에서 흔히 활용됩니다. 따라서 이미지 캡셔닝은 이미지를 설명함으로써 사람들의 콘텐츠 접근성을 개선하는 데 도움이 됩니다. 이 가이드에서는 소개할 내용은 아래와 같습니다: * 이미지 캡셔닝 모델을 파인튜닝합니다. * 파인튜닝된 모델을 추론에 사용합니다. 시작하기 전에 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate -q pip install jiwer -q ``` Hugging Face 계정에 로그인하면 모델을 업로드하고 커뮤니티에 공유할 수 있습니다. 토큰을 입력하여 로그인하세요. ```python from huggingface_hub import notebook_login notebook_login() ``` ## 포켓몬 BLIP 캡션 데이터세트 가져오기[[load-the-pokmon-blip-captions-dataset]] {이미지-캡션} 쌍으로 구성된 데이터세트를 가져오려면 🤗 Dataset 라이브러리를 사용합니다. PyTorch에서 자신만의 이미지 캡션 데이터세트를 만들려면 [이 노트북](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb)을 참조하세요. ```python from datasets import load_dataset ds = load_dataset("lambdalabs/pokemon-blip-captions") ds ``` ```bash DatasetDict({ train: Dataset({ features: ['image', 'text'], num_rows: 833 }) }) ``` 이 데이터세트는 `image`와 `text`라는 두 특성을 가지고 있습니다. <Tip> 많은 이미지 캡션 데이터세트에는 이미지당 여러 개의 캡션이 포함되어 있습니다. 이러한 경우, 일반적으로 학습 중에 사용 가능한 캡션 중에서 무작위로 샘플을 추출합니다. </Tip> [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 데이터세트의 학습 분할을 학습 및 테스트 세트로 나눕니다: ```python ds = ds["train"].train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` 학습 세트의 샘플 몇 개를 시각화해 봅시다. Let's visualize a couple of samples from the training set. ```python from textwrap import wrap import matplotlib.pyplot as plt import numpy as np def plot_images(images, captions): plt.figure(figsize=(20, 20)) for i in range(len(images)): ax = plt.subplot(1, len(images), i + 1) caption = captions[i] caption = "\n".join(wrap(caption, 12)) plt.title(caption) plt.imshow(images[i]) plt.axis("off") sample_images_to_visualize = [np.array(train_ds[i]["image"]) for i in range(5)] sample_captions = [train_ds[i]["text"] for i in range(5)] plot_images(sample_images_to_visualize, sample_captions) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/sample_training_images_image_cap.png" alt="Sample training images"/> </div> ## 데이터세트 전처리[[preprocess-the-dataset]] 데이터세트에는 이미지와 텍스트라는 두 가지 양식이 있기 때문에, 전처리 파이프라인에서 이미지와 캡션을 모두 전처리합니다. 전처리 작업을 위해, 파인튜닝하려는 모델에 연결된 프로세서 클래스를 가져옵니다. ```python from transformers import AutoProcessor checkpoint = "microsoft/git-base" processor = AutoProcessor.from_pretrained(checkpoint) ``` 프로세서는 내부적으로 크기 조정 및 픽셀 크기 조정을 포함한 이미지 전처리를 수행하고 캡션을 토큰화합니다. ```python def transforms(example_batch): images = [x for x in example_batch["image"]] captions = [x for x in example_batch["text"]] inputs = processor(images=images, text=captions, padding="max_length") inputs.update({"labels": inputs["input_ids"]}) return inputs train_ds.set_transform(transforms) test_ds.set_transform(transforms) ``` 데이터세트가 준비되었으니 이제 파인튜닝을 위해 모델을 설정할 수 있습니다. ## 기본 모델 가져오기[[load-a-base-model]] ["microsoft/git-base"](https://huggingface.co/microsoft/git-base)를 [`AutoModelForCausalLM`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) 객체로 가져옵니다. ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(checkpoint) ``` ## 평가[[evaluate]] 이미지 캡션 모델은 일반적으로 [Rouge 점수](https://huggingface.co/spaces/evaluate-metric/rouge) 또는 [단어 오류율(Word Error Rate)](https://huggingface.co/spaces/evaluate-metric/wer)로 평가합니다. 이 가이드에서는 단어 오류율(WER)을 사용합니다. 이를 위해 🤗 Evaluate 라이브러리를 사용합니다. WER의 잠재적 제한 사항 및 기타 문제점은 [이 가이드](https://huggingface.co/spaces/evaluate-metric/wer)를 참조하세요. ```python from evaluate import load import torch wer = load("wer") def compute_metrics(eval_pred): logits, labels = eval_pred predicted = logits.argmax(-1) decoded_labels = processor.batch_decode(labels, skip_special_tokens=True) decoded_predictions = processor.batch_decode(predicted, skip_special_tokens=True) wer_score = wer.compute(predictions=decoded_predictions, references=decoded_labels) return {"wer_score": wer_score} ``` ## 학습![[train!]] 이제 모델 파인튜닝을 시작할 준비가 되었습니다. 이를 위해 🤗 [`Trainer`]를 사용합니다. 먼저, [`TrainingArguments`]를 사용하여 학습 인수를 정의합니다. ```python from transformers import TrainingArguments, Trainer model_name = checkpoint.split("/")[1] training_args = TrainingArguments( output_dir=f"{model_name}-pokemon", learning_rate=5e-5, num_train_epochs=50, fp16=True, per_device_train_batch_size=32, per_device_eval_batch_size=32, gradient_accumulation_steps=2, save_total_limit=3, evaluation_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=50, logging_steps=50, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], load_best_model_at_end=True, ) ``` 학습 인수를 데이터세트, 모델과 함께 🤗 Trainer에 전달합니다. ```python trainer = Trainer( model=model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) ``` 학습을 시작하려면 [`Trainer`] 객체에서 [`~Trainer.train`]을 호출하기만 하면 됩니다. ```python trainer.train() ``` 학습이 진행되면서 학습 손실이 원활하게 감소하는 것을 볼 수 있습니다. 학습이 완료되면 모든 사람이 모델을 사용할 수 있도록 [`~Trainer.push_to_hub`] 메소드를 사용하여 모델을 허브에 공유하세요: ```python trainer.push_to_hub() ``` ## 추론[[inference]] `test_ds`에서 샘플 이미지를 가져와 모델을 테스트합니다. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/test_image_image_cap.png" alt="Test image"/> </div> 모델에 사용할 이미지를 준비합니다. ```python device = "cuda" if torch.cuda.is_available() else "cpu" inputs = processor(images=image, return_tensors="pt").to(device) pixel_values = inputs.pixel_values ``` [`generate`]를 호출하고 예측을 디코딩합니다. ```python generated_ids = model.generate(pixel_values=pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption) ``` ```bash a drawing of a pink and blue pokemon ``` 파인튜닝된 모델이 꽤 괜찮은 캡션을 생성한 것 같습니다!
transformers/docs/source/ko/tasks/image_captioning.md/0
{ "file_path": "transformers/docs/source/ko/tasks/image_captioning.md", "repo_id": "transformers", "token_count": 5078 }
279
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 제로샷(zero-shot) 객체 탐지[[zeroshot-object-detection]] [[open-in-colab]] 일반적으로 [객체 탐지](object_detection)에 사용되는 모델을 학습하기 위해서는 레이블이 지정된 이미지 데이터 세트가 필요합니다. 그리고 학습 데이터에 존재하는 클래스(레이블)만 탐지할 수 있다는 한계점이 있습니다. 다른 방식을 사용하는 [OWL-ViT](../model_doc/owlvit) 모델로 제로샷 객체 탐지가 가능합니다. OWL-ViT는 개방형 어휘(open-vocabulary) 객체 탐지기입니다. 즉, 레이블이 지정된 데이터 세트에 미세 조정하지 않고 자유 텍스트 쿼리를 기반으로 이미지에서 객체를 탐지할 수 있습니다. OWL-ViT 모델은 멀티 모달 표현을 활용해 개방형 어휘 탐지(open-vocabulary detection)를 수행합니다. [CLIP](../model_doc/clip) 모델에 경량화(lightweight)된 객체 분류와 지역화(localization) 헤드를 결합합니다. 개방형 어휘 탐지는 CLIP의 텍스트 인코더로 free-text 쿼리를 임베딩하고, 객체 분류와 지역화 헤드의 입력으로 사용합니다. 이미지와 해당 텍스트 설명을 연결하면 ViT가 이미지 패치(image patches)를 입력으로 처리합니다. OWL-ViT 모델의 저자들은 CLIP 모델을 처음부터 학습(scratch learning)한 후에, bipartite matching loss를 사용하여 표준 객체 인식 데이터셋으로 OWL-ViT 모델을 미세 조정했습니다. 이 접근 방식을 사용하면 모델은 레이블이 지정된 데이터 세트에 대한 사전 학습 없이도 텍스트 설명을 기반으로 객체를 탐지할 수 있습니다. 이번 가이드에서는 OWL-ViT 모델의 사용법을 다룰 것입니다: - 텍스트 프롬프트 기반 객체 탐지 - 일괄 객체 탐지 - 이미지 가이드 객체 탐지 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install -q transformers ``` ## 제로샷(zero-shot) 객체 탐지 파이프라인[[zeroshot-object-detection-pipeline]] [`pipeline`]을 활용하면 가장 간단하게 OWL-ViT 모델을 추론해볼 수 있습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 제로샷(zero-shot) 객체 탐지용 파이프라인을 인스턴스화합니다: ```python >>> from transformers import pipeline >>> checkpoint = "google/owlvit-base-patch32" >>> detector = pipeline(model=checkpoint, task="zero-shot-object-detection") ``` 다음으로, 객체를 탐지하고 싶은 이미지를 선택하세요. 여기서는 [NASA](https://www.nasa.gov/multimedia/imagegallery/index.html) Great Images 데이터 세트의 일부인 우주비행사 에일린 콜린스(Eileen Collins) 사진을 사용하겠습니다. ```py >>> import skimage >>> import numpy as np >>> from PIL import Image >>> image = skimage.data.astronaut() >>> image = Image.fromarray(np.uint8(image)).convert("RGB") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_1.png" alt="Astronaut Eileen Collins"/> </div> 이미지와 해당 이미지의 후보 레이블을 파이프라인으로 전달합니다. 여기서는 이미지를 직접 전달하지만, 컴퓨터에 저장된 이미지의 경로나 url로 전달할 수도 있습니다. candidate_labels는 이 예시처럼 간단한 단어일 수도 있고 좀 더 설명적인 단어일 수도 있습니다. 또한, 이미지를 검색(query)하려는 모든 항목에 대한 텍스트 설명도 전달합니다. ```py >>> predictions = detector( ... image, ... candidate_labels=["human face", "rocket", "nasa badge", "star-spangled banner"], ... ) >>> predictions [{'score': 0.3571370542049408, 'label': 'human face', 'box': {'xmin': 180, 'ymin': 71, 'xmax': 271, 'ymax': 178}}, {'score': 0.28099656105041504, 'label': 'nasa badge', 'box': {'xmin': 129, 'ymin': 348, 'xmax': 206, 'ymax': 427}}, {'score': 0.2110239565372467, 'label': 'rocket', 'box': {'xmin': 350, 'ymin': -1, 'xmax': 468, 'ymax': 288}}, {'score': 0.13790413737297058, 'label': 'star-spangled banner', 'box': {'xmin': 1, 'ymin': 1, 'xmax': 105, 'ymax': 509}}, {'score': 0.11950037628412247, 'label': 'nasa badge', 'box': {'xmin': 277, 'ymin': 338, 'xmax': 327, 'ymax': 380}}, {'score': 0.10649408400058746, 'label': 'rocket', 'box': {'xmin': 358, 'ymin': 64, 'xmax': 424, 'ymax': 280}}] ``` 이제 예측값을 시각화해봅시다: ```py >>> from PIL import ImageDraw >>> draw = ImageDraw.Draw(image) >>> for prediction in predictions: ... box = prediction["box"] ... label = prediction["label"] ... score = prediction["score"] ... xmin, ymin, xmax, ymax = box.values() ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{label}: {round(score,2)}", fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_2.png" alt="Visualized predictions on NASA image"/> </div> ## 텍스트 프롬프트 기반 객체 탐지[[textprompted-zeroshot-object-detection-by-hand]] 제로샷 객체 탐지 파이프라인 사용법에 대해 살펴보았으니, 이제 동일한 결과를 복제해보겠습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?other=owlvit)에서 관련 모델과 프로세서를 가져오는 것으로 시작합니다. 여기서는 이전과 동일한 체크포인트를 사용하겠습니다: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection >>> model = AutoModelForZeroShotObjectDetection.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` 다른 이미지를 사용해 보겠습니다: ```py >>> import requests >>> url = "https://unsplash.com/photos/oj0zeY2Ltk4/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTR8fHBpY25pY3xlbnwwfHx8fDE2Nzc0OTE1NDk&force=true&w=640" >>> im = Image.open(requests.get(url, stream=True).raw) >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_3.png" alt="Beach photo"/> </div> 프로세서를 사용해 모델의 입력을 준비합니다. 프로세서는 모델의 입력으로 사용하기 위해 이미지 크기를 변환하고 정규화하는 이미지 프로세서와 텍스트 입력을 처리하는 [`CLIPTokenizer`]로 구성됩니다. ```py >>> text_queries = ["hat", "book", "sunglasses", "camera"] >>> inputs = processor(text=text_queries, images=im, return_tensors="pt") ``` 모델에 입력을 전달하고 결과를 후처리 및 시각화합니다. 이미지 프로세서가 모델에 이미지를 입력하기 전에 이미지 크기를 조정했기 때문에, [`~OwlViTImageProcessor.post_process_object_detection`] 메소드를 사용해 예측값의 바운딩 박스(bounding box)가 원본 이미지의 좌표와 상대적으로 동일한지 확인해야 합니다. ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = torch.tensor([im.size[::-1]]) ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(im) >>> scores = results["scores"].tolist() >>> labels = results["labels"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[label]}: {round(score,2)}", fill="white") >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## 일괄 처리[[batch-processing]] 여러 이미지와 텍스트 쿼리를 전달하여 여러 이미지에서 서로 다른(또는 동일한) 객체를 검색할 수 있습니다. 일괄 처리를 위해서 텍스트 쿼리는 이중 리스트로, 이미지는 PIL 이미지, PyTorch 텐서, 또는 NumPy 배열로 이루어진 리스트로 프로세서에 전달해야 합니다. ```py >>> images = [image, im] >>> text_queries = [ ... ["human face", "rocket", "nasa badge", "star-spangled banner"], ... ["hat", "book", "sunglasses", "camera"], ... ] >>> inputs = processor(text=text_queries, images=images, return_tensors="pt") ``` 이전에는 후처리를 위해 단일 이미지의 크기를 텐서로 전달했지만, 튜플을 전달할 수 있고, 여러 이미지를 처리하는 경우에는 튜플로 이루어진 리스트를 전달할 수도 있습니다. 아래 두 예제에 대한 예측을 생성하고, 두 번째 이미지(`image_idx = 1`)를 시각화해 보겠습니다. ```py >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = [x.size[::-1] for x in images] ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes) >>> image_idx = 1 >>> draw = ImageDraw.Draw(images[image_idx]) >>> scores = results[image_idx]["scores"].tolist() >>> labels = results[image_idx]["labels"].tolist() >>> boxes = results[image_idx]["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[image_idx][label]}: {round(score,2)}", fill="white") >>> images[image_idx] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## 이미지 가이드 객체 탐지[[imageguided-object-detection]] 텍스트 쿼리를 이용한 제로샷 객체 탐지 외에도 OWL-ViT 모델은 이미지 가이드 객체 탐지 기능을 제공합니다. 이미지를 쿼리로 사용해 대상 이미지에서 유사한 객체를 찾을 수 있다는 의미입니다. 텍스트 쿼리와 달리 하나의 예제 이미지에서만 가능합니다. 소파에 고양이 두 마리가 있는 이미지를 대상 이미지(target image)로, 고양이 한 마리가 있는 이미지를 쿼리로 사용해보겠습니다: ```py >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image_target = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000524280.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) ``` 다음 이미지를 살펴보겠습니다: ```py >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 2) >>> ax[0].imshow(image_target) >>> ax[1].imshow(query_image) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_5.png" alt="Cats"/> </div> 전처리 단계에서 텍스트 쿼리 대신에 `query_images`를 사용합니다: ```py >>> inputs = processor(images=image_target, query_images=query_image, return_tensors="pt") ``` 예측의 경우, 모델에 입력을 전달하는 대신 [`~OwlViTForObjectDetection.image_guided_detection`]에 전달합니다. 레이블이 없다는 점을 제외하면 이전과 동일합니다. 이전과 동일하게 이미지를 시각화합니다. ```py >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) ... target_sizes = torch.tensor([image_target.size[::-1]]) ... results = processor.post_process_image_guided_detection(outputs=outputs, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(image_target) >>> scores = results["scores"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="white", width=4) >>> image_target ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_6.png" alt="Cats with bounding boxes"/> </div> OWL-ViT 모델을 추론하고 싶다면 아래 데모를 확인하세요: <iframe src="https://adirik-owl-vit.hf.space" frameborder="0" width="850" height="450" ></iframe>
transformers/docs/source/ko/tasks/zero_shot_object_detection.md/0
{ "file_path": "transformers/docs/source/ko/tasks/zero_shot_object_detection.md", "repo_id": "transformers", "token_count": 7704 }
280
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Criar uma arquitetura customizada Uma [`AutoClass`](model_doc/auto) automaticamente infere a arquitetura do modelo e baixa configurações e pesos pré-treinados. Geralmente, nós recomendamos usar uma `AutoClass` para produzir um código independente de checkpoints. Mas usuários que querem mais contole sobre parâmetros específicos do modelo pode criar um modelo customizado 🤗 Transformers a partir de algumas classes bases. Isso pode ser particulamente útil para alguém que está interessado em estudar, treinar ou fazer experimentos com um modelo 🤗 Transformers. Nesse tutorial, será explicado como criar um modelo customizado sem uma `AutoClass`. Aprenda como: - Carregar e customizar a configuração de um modelo. - Criar a arquitetura de um modelo. - Criar um tokenizer rápido e devagar para textos. - Criar extrator de features para tarefas envolvendo audio e imagem. - Criar um processador para tarefas multimodais. ## configuration A [configuration](main_classes/configuration) refere-se a atributos específicos de um modelo. Cada configuração de modelo tem atributos diferentes; por exemplo, todos modelo de PLN possuem os atributos `hidden_size`, `num_attention_heads`, `num_hidden_layers` e `vocab_size` em comum. Esse atributos especificam o numero de 'attention heads' ou 'hidden layers' para construir um modelo. Dê uma olhada a mais em [DistilBERT](model_doc/distilbert) acessando [`DistilBertConfig`] para observar esses atributos: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`] mostra todos os atributos padrões usados para construir um [`DistilBertModel`] base. Todos atributos são customizáveis, o que cria espaço para experimentos. Por exemplo, você pode customizar um modelo padrão para: - Tentar uma função de ativação diferente com o parâmetro `activation`. - Usar uma taxa de desistência maior para as probabilidades de 'attention' com o parâmetro `attention_dropout`. ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` Atributos de um modelo pré-treinado podem ser modificados na função [`~PretrainedConfig.from_pretrained`]: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` Uma vez que você está satisfeito com as configurações do seu modelo, você consegue salvar elas com [`~PretrainedConfig.save_pretrained`]. Seu arquivo de configurações está salvo como um arquivo JSON no diretório especificado: ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` Para reusar o arquivo de configurações, carregue com [`~PretrainedConfig.from_pretrained`]: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") ``` <Tip> Você pode também salvar seu arquivo de configurações como um dicionário ou até mesmo com a diferença entre as seus atributos de configuração customizados e os atributos de configuração padrões! Olhe a documentação [configuration](main_classes/configuration) para mais detalhes. </Tip> ## Modelo O próximo passo é criar um [model](main_classes/models). O modelo - também vagamente referido como arquitetura - define o que cada camada está fazendo e quais operações estão acontecendo. Atributos como `num_hidden_layers` das configurações são utilizados para definir a arquitetura. Todo modelo compartilha a classe base [`PreTrainedModel`] e alguns métodos em comum como redimensionar o tamanho dos embeddings de entrada e podar as 'self-attention heads'. Além disso, todos os modelos também são subclasses de [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) ou [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html). Isso significa que os modelos são compatíveis com cada respectivo uso de framework. <frameworkcontent> <pt> Carregar seus atributos de configuração customizados em um modelo: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> model = DistilBertModel(my_config) ``` Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar. Criar um modelo pré-treinado com [`~PreTrainedModel.from_pretrained`]: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> Carregar os seus próprios atributos padrões de contiguração no modelo: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` Isso cria um modelo com valores aleatórios ao invés de pré-treinar os pesos. Você não irá conseguir usar usar esse modelo para nada útil ainda, até você treinar ele. Treino é um processo caro e demorado. Geralmente é melhor utilizar um modelo pré-treinado para obter melhores resultados mais rápido, enquanto usa apenas uma fração dos recursos necessários para treinar. Criar um modelo pré-treinado com [`~TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` Quando você carregar os pesos pré-treinados, a configuração padrão do modelo é automaticamente carregada se o modelo é provido pelo 🤗 Transformers. No entanto, você ainda consegue mudar - alguns ou todos - os atributos padrões de configuração do modelo com os seus próprio atributos, se você preferir: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### Heads do modelo Neste ponto, você tem um modelo básico do DistilBERT que gera os *estados ocultos*. Os estados ocultos são passados como entrada para a head do moelo para produzir a saída final. 🤗 Transformers fornece uma head de modelo diferente para cada tarefa desde que o modelo suporte essa tarefa (por exemplo, você não consegue utilizar o modelo DistilBERT para uma tarefa de 'sequence-to-sequence' como tradução). <frameworkcontent> <pt> Por exemplo, [`DistilBertForSequenceClassification`] é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas. ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo [`DistilBertForQuestionAnswering`]. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas. ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> Por exemplo, [`TFDistilBertForSequenceClassification`] é um modelo DistilBERT base com uma head de classificação de sequência. A head de calssificação de sequência é uma camada linear no topo das saídas agrupadas. ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Reutilize facilmente esse ponto de parada para outra tarefe mudando para uma head de modelo diferente. Para uma tarefe de responder questões, você usaria a head do modelo [`TFDistilBertForQuestionAnswering`]. A head de responder questões é similar com a de classificação de sequências exceto o fato de que ela é uma camada no topo dos estados das saídas ocultas. ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## Tokenizer A útlima classe base que você precisa antes de usar um modelo para dados textuais é a [tokenizer](main_classes/tokenizer) para converter textos originais para tensores. Existem dois tipos de tokenizers que você pode usar com 🤗 Transformers: - [`PreTrainedTokenizer`]: uma implementação em Python de um tokenizer. - [`PreTrainedTokenizerFast`]: um tokenizer da nossa biblioteca [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) baseada em Rust. Esse tipo de tokenizer é significantemente mais rapido - especialmente durante tokenization de codificação - devido a implementação em Rust. O tokenizer rápido tambem oferece métodos adicionais como *offset mapping* que mapeia tokens para suar palavras ou caracteres originais. Os dois tokenizers suporta métodos comuns como os de codificar e decodificar, adicionar novos tokens, e gerenciar tokens especiais. <Tip warning={true}> Nem todo modelo suporta um 'fast tokenizer'. De uma olhada aqui [table](index#supported-frameworks) pra checar se um modelo suporta 'fast tokenizer'. </Tip> Se você treinou seu prórpio tokenizer, você pode criar um a partir do seu arquivo *vocabulary*: ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` É importante lembrar que o vocabulário de um tokenizer customizado será diferente de um vocabulário gerado pelo tokenizer de um modelo pré treinado. Você precisa usar o vocabulário de um modelo pré treinado se você estiver usando um modelo pré treinado, caso contrário as entradas não farão sentido. Criando um tokenizer com um vocabulário de um modelo pré treinado com a classe [`DistilBertTokenizer`]: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` Criando um 'fast tokenizer' com a classe [`DistilBertTokenizerFast`]: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> Pos padrão, [`AutoTokenizer`] tentará carregar um 'fast tokenizer'. Você pode disabilitar esse comportamento colocando `use_fast=False` no `from_pretrained`. </Tip> ## Extrator de features Um extrator de features processa entradas de imagem ou áudio. Ele herda da classe base [`~feature_extraction_utils.FeatureExtractionMixin`], e pode também herdar da classe [`ImageFeatureExtractionMixin`] para processamento de features de imagem ou da classe [`SequenceFeatureExtractor`] para processamento de entradas de áudio. Dependendo do que você está trabalhando em um audio ou uma tarefa de visão, crie um estrator de features associado com o modelo que você está usando. Por exemplo, crie um [`ViTFeatureExtractor`] padrão se você estiver usando [ViT](model_doc/vit) para classificação de imagens: ```py >>> from transformers import ViTFeatureExtractor >>> vit_extractor = ViTFeatureExtractor() >>> print(vit_extractor) ViTFeatureExtractor { "do_normalize": true, "do_resize": true, "feature_extractor_type": "ViTFeatureExtractor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> Se você não estiver procurando por nenhuma customização, apenas use o método `from_pretrained` para carregar parâmetros do modelo de extrator de features padrão. </Tip> Modifique qualquer parâmetro dentre os [`ViTFeatureExtractor`] para criar seu extrator de features customizado. ```py >>> from transformers import ViTFeatureExtractor >>> my_vit_extractor = ViTFeatureExtractor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTFeatureExtractor { "do_normalize": false, "do_resize": true, "feature_extractor_type": "ViTFeatureExtractor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` Para entradas de áutio, você pode criar um [`Wav2Vec2FeatureExtractor`] e customizar os parâmetros de uma forma similar: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` ## Processor Para modelos que suportam tarefas multimodais, 🤗 Transformers oferece uma classe processadora que convenientemente cobre um extrator de features e tokenizer dentro de um único objeto. Por exemplo, vamos usar o [`Wav2Vec2Processor`] para uma tarefa de reconhecimento de fala automática (ASR). ASR transcreve áudio para texto, então você irá precisar de um extrator de um features e um tokenizer. Crie um extrator de features para lidar com as entradas de áudio. ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` Crie um tokenizer para lidar com a entrada de textos: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` Combine o extrator de features e o tokenizer no [`Wav2Vec2Processor`]: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` Com duas classes básicas - configuração e modelo - e um preprocessamento de classe adicional (tokenizer, extrator de features, ou processador), você pode criar qualquer modelo que suportado por 🤗 Transformers. Qualquer uma dessas classes base são configuráveis, te permitindo usar os atributos específicos que você queira. Você pode facilmente preparar um modelo para treinamento ou modificar um modelo pré-treinado com poucas mudanças.
transformers/docs/source/pt/create_a_model.md/0
{ "file_path": "transformers/docs/source/pt/create_a_model.md", "repo_id": "transformers", "token_count": 6000 }
281
- sections: - local: index title: 🤗 Transformers title: Get started
transformers/docs/source/tr/_toctree.yml/0
{ "file_path": "transformers/docs/source/tr/_toctree.yml", "repo_id": "transformers", "token_count": 25 }
282
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 安装 为你正在使用的深度学习框架安装 🤗 Transformers、设置缓存,并选择性配置 🤗 Transformers 以离线运行。 🤗 Transformers 已在 Python 3.6+、PyTorch 1.1.0+、TensorFlow 2.0+ 以及 Flax 上进行测试。针对你使用的深度学习框架,请参照以下安装说明进行安装: * [PyTorch](https://pytorch.org/get-started/locally/) 安装说明。 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) 安装说明。 * [Flax](https://flax.readthedocs.io/en/latest/) 安装说明。 ## 使用 pip 安装 你应该使用 [虚拟环境](https://docs.python.org/3/library/venv.html) 安装 🤗 Transformers。如果你不熟悉 Python 虚拟环境,请查看此 [教程](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)。使用虚拟环境,你可以轻松管理不同项目,避免不同依赖项之间的兼容性问题。 首先,在项目目录中创建虚拟环境: ```bash python -m venv .env ``` 在 Linux 和 MacOs 系统中激活虚拟环境: ```bash source .env/bin/activate ``` 在 Windows 系统中激活虚拟环境: ```bash .env/Scripts/activate ``` 现在你可以使用以下命令安装 🤗 Transformers: ```bash pip install transformers ``` 若仅需 CPU 支持,可以使用单行命令方便地安装 🤗 Transformers 和深度学习库。例如,使用以下命令安装 🤗 Transformers 和 PyTorch: ```bash pip install 'transformers[torch]' ``` 🤗 Transformers 和 TensorFlow 2.0: ```bash pip install 'transformers[tf-cpu]' ``` <Tip warning={true}> M1 / ARM用户 在安装 TensorFlow 2.0 前,你需要安装以下库: ```bash brew install cmake brew install pkg-config ``` </Tip> 🤗 Transformers 和 Flax: ```bash pip install 'transformers[flax]' ``` 最后,运行以下命令以检查 🤗 Transformers 是否已被正确安装。该命令将下载一个预训练模型: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` 然后打印标签以及分数: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## 源码安装 使用以下命令从源码安装 🤗 Transformers: ```bash pip install git+https://github.com/huggingface/transformers ``` 此命令下载的是最新的前沿 `main` 版本而不是最新的 `stable` 版本。`main` 版本适用于跟最新开发保持一致。例如,上次正式版发布带来的 bug 被修复了,但新版本尚未被推出。但是,这也说明 `main` 版本并不一定总是稳定的。我们努力保持 `main` 版本的可操作性,大多数问题通常在几个小时或一天以内就能被解决。如果你遇到问题,请提个 [Issue](https://github.com/huggingface/transformers/issues) 以便我们能更快修复。 运行以下命令以检查 🤗 Transformers 是否已被正确安装: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 可编辑安装 如果你有下列需求,需要进行可编辑安装: * 使用源码的 `main` 版本。 * 为 🤗 Transformers 贡献代码,需要测试代码中的更改。 使用以下命令克隆仓库并安装 🤗 Transformers: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 这些命令将会链接你克隆的仓库以及你的 Python 库路径。现在,Python 不仅会在正常的库路径中搜索库,也会在你克隆到的文件夹中进行查找。例如,如果你的 Python 包通常本应安装在 `~/anaconda3/envs/main/lib/python3.7/site-packages/` 目录中,在这种情况下 Python 也会搜索你克隆到的文件夹:`~/transformers/`。 <Tip warning={true}> 如果你想继续使用这个库,必须保留 `transformers` 文件夹。 </Tip> 现在,你可以使用以下命令,将你克隆的 🤗 Transformers 库轻松更新至最新版本: ```bash cd ~/transformers/ git pull ``` 你的 Python 环境将在下次运行时找到 `main` 版本的 🤗 Transformers。 ## 使用 conda 安装 从 conda 的 `conda-forge` 频道安装: ```bash conda install conda-forge::transformers ``` ## 缓存设置 预训练模型会被下载并本地缓存到 `~/.cache/huggingface/hub`。这是由环境变量 `TRANSFORMERS_CACHE` 指定的默认目录。在 Windows 上,默认目录为 `C:\Users\username\.cache\huggingface\hub`。你可以按照不同优先级改变下述环境变量,以指定不同的缓存目录。 1. 环境变量(默认): `HUGGINGFACE_HUB_CACHE` 或 `TRANSFORMERS_CACHE`。 2. 环境变量 `HF_HOME`。 3. 环境变量 `XDG_CACHE_HOME` + `/huggingface`。 <Tip> 除非你明确指定了环境变量 `TRANSFORMERS_CACHE`,🤗 Transformers 将可能会使用较早版本设置的环境变量 `PYTORCH_TRANSFORMERS_CACHE` 或 `PYTORCH_PRETRAINED_BERT_CACHE`。 </Tip> ## 离线模式 🤗 Transformers 可以仅使用本地文件在防火墙或离线环境中运行。设置环境变量 `TRANSFORMERS_OFFLINE=1` 以启用该行为。 <Tip> 通过设置环境变量 `HF_DATASETS_OFFLINE=1` 将 [🤗 Datasets](https://huggingface.co/docs/datasets/) 添加至你的离线训练工作流程中。 </Tip> 例如,你通常会使用以下命令对外部实例进行防火墙保护的的普通网络上运行程序: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 在离线环境中运行相同的程序: ```bash HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` 现在脚本可以应该正常运行,而无需挂起或等待超时,因为它知道只应查找本地文件。 ### 获取离线时使用的模型和分词器 另一种离线时使用 🤗 Transformers 的方法是预先下载好文件,然后在需要离线使用时指向它们的离线路径。有三种实现的方法: * 单击 [Model Hub](https://huggingface.co/models) 用户界面上的 ↓ 图标下载文件。 ![下载图标](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * 使用 [`PreTrainedModel.from_pretrained`] 和 [`PreTrainedModel.save_pretrained`] 工作流程: 1. 预先使用 [`PreTrainedModel.from_pretrained`] 下载文件: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. 使用 [`PreTrainedModel.save_pretrained`] 将文件保存至指定目录: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. 现在,你可以在离线时从指定目录使用 [`PreTrainedModel.from_pretrained`] 重新加载你的文件: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * 使用代码用 [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) 库下载文件: 1. 在你的虚拟环境中安装 `huggingface_hub` 库: ```bash python -m pip install huggingface_hub ``` 2. 使用 [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub) 函数将文件下载到指定路径。例如,以下命令将 `config.json` 文件从 [T0](https://huggingface.co/bigscience/T0_3B) 模型下载至你想要的路径: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` 下载完文件并在本地缓存后,指定其本地路径以加载和使用该模型: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> 请参阅 [如何从 Hub 下载文件](https://huggingface.co/docs/hub/how-to-downstream) 部分,获取有关下载存储在 Hub 上文件的更多详细信息。 </Tip>
transformers/docs/source/zh/installation.md/0
{ "file_path": "transformers/docs/source/zh/installation.md", "repo_id": "transformers", "token_count": 4836 }
283
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用 🤗 PEFT 加载adapters [[open-in-colab]] [参数高效微调(PEFT)方法](https://huggingface.co/blog/peft)在微调过程中冻结预训练模型的参数,并在其顶部添加少量可训练参数(adapters)。adapters被训练以学习特定任务的信息。这种方法已被证明非常节省内存,同时具有较低的计算使用量,同时产生与完全微调模型相当的结果。 使用PEFT训练的adapters通常比完整模型小一个数量级,使其方便共享、存储和加载。 <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">与完整尺寸的模型权重(约为700MB)相比,存储在Hub上的OPTForCausalLM模型的adapter权重仅为~6MB。</figcaption> </div> 如果您对学习更多关于🤗 PEFT库感兴趣,请查看[文档](https://huggingface.co/docs/peft/index)。 ## 设置 首先安装 🤗 PEFT: ```bash pip install peft ``` 如果你想尝试全新的特性,你可能会有兴趣从源代码安装这个库: ```bash pip install git+https://github.com/huggingface/peft.git ``` ## 支持的 PEFT 模型 Transformers原生支持一些PEFT方法,这意味着你可以加载本地存储或在Hub上的adapter权重,并使用几行代码轻松运行或训练它们。以下是受支持的方法: - [Low Rank Adapters](https://huggingface.co/docs/peft/conceptual_guides/lora) - [IA3](https://huggingface.co/docs/peft/conceptual_guides/ia3) - [AdaLoRA](https://arxiv.org/abs/2303.10512) 如果你想使用其他PEFT方法,例如提示学习或提示微调,或者关于通用的 🤗 PEFT库,请参阅[文档](https://huggingface.co/docs/peft/index)。 ## 加载 PEFT adapter 要从huggingface的Transformers库中加载并使用PEFTadapter模型,请确保Hub仓库或本地目录包含一个`adapter_config.json`文件和adapter权重,如上例所示。然后,您可以使用`AutoModelFor`类加载PEFT adapter模型。例如,要为因果语言建模加载一个PEFT adapter模型: 1. 指定PEFT模型id 2. 将其传递给[`AutoModelForCausalLM`]类 ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id) ``` <Tip> 你可以使用`AutoModelFor`类或基础模型类(如`OPTForCausalLM`或`LlamaForCausalLM`)来加载一个PEFT adapter。 </Tip> 您也可以通过`load_adapter`方法来加载 PEFT adapter。 ```py from transformers import AutoModelForCausalLM, AutoTokenizer model_id = "facebook/opt-350m" peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(model_id) model.load_adapter(peft_model_id) ``` ## 基于8bit或4bit进行加载 `bitsandbytes`集成支持8bit和4bit精度数据类型,这对于加载大模型非常有用,因为它可以节省内存(请参阅`bitsandbytes`[指南](./quantization#bitsandbytes-integration)以了解更多信息)。要有效地将模型分配到您的硬件,请在[`~PreTrainedModel.from_pretrained`]中添加`load_in_8bit`或`load_in_4bit`参数,并将`device_map="auto"`设置为: ```py from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-350m-lora" model = AutoModelForCausalLM.from_pretrained(peft_model_id, device_map="auto", load_in_8bit=True) ``` ## 添加新的adapter 你可以使用[`~peft.PeftModel.add_adapter`]方法为一个已有adapter的模型添加一个新的adapter,只要新adapter的类型与当前adapter相同即可。例如,如果你有一个附加到模型上的LoRA adapter: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id) lora_config = LoraConfig( target_modules=["q_proj", "k_proj"], init_lora_weights=False ) model.add_adapter(lora_config, adapter_name="adapter_1") ``` 添加一个新的adapter: ```py # attach new adapter with same config model.add_adapter(lora_config, adapter_name="adapter_2") ``` 现在您可以使用[`~peft.PeftModel.set_adapter`]来设置要使用的adapter。 ```py # use adapter_1 model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) # use adapter_2 model.set_adapter("adapter_2") output_enabled = model.generate(**inputs) print(tokenizer.decode(output_enabled[0], skip_special_tokens=True)) ``` ## 启用和禁用adapters 一旦您将adapter添加到模型中,您可以启用或禁用adapter模块。要启用adapter模块: ```py from transformers import AutoModelForCausalLM, OPTForCausalLM, AutoTokenizer from peft import PeftConfig model_id = "facebook/opt-350m" adapter_model_id = "ybelkada/opt-350m-lora" tokenizer = AutoTokenizer.from_pretrained(model_id) text = "Hello" inputs = tokenizer(text, return_tensors="pt") model = AutoModelForCausalLM.from_pretrained(model_id) peft_config = PeftConfig.from_pretrained(adapter_model_id) # to initiate with random weights peft_config.init_lora_weights = False model.add_adapter(peft_config) model.enable_adapters() output = model.generate(**inputs) ``` 要禁用adapter模块: ```py model.disable_adapters() output = model.generate(**inputs) ``` ## 训练一个 PEFT adapter PEFT适配器受[`Trainer`]类支持,因此您可以为您的特定用例训练适配器。它只需要添加几行代码即可。例如,要训练一个LoRA adapter: <Tip> 如果你不熟悉如何使用[`Trainer`]微调模型,请查看[微调预训练模型](training)教程。 </Tip> 1. 使用任务类型和超参数定义adapter配置(参见[`~peft.LoraConfig`]以了解超参数的详细信息)。 ```py from peft import LoraConfig peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM", ) ``` 2. 将adapter添加到模型中。 ```py model.add_adapter(peft_config) ``` 3. 现在可以将模型传递给[`Trainer`]了! ```py trainer = Trainer(model=model, ...) trainer.train() ``` 要保存训练好的adapter并重新加载它: ```py model.save_pretrained(save_dir) model = AutoModelForCausalLM.from_pretrained(save_dir) ``` <!-- TODO: (@younesbelkada @stevhliu) - Link to PEFT docs for further details - Trainer - 8-bit / 4-bit examples ? -->
transformers/docs/source/zh/peft.md/0
{ "file_path": "transformers/docs/source/zh/peft.md", "repo_id": "transformers", "token_count": 3628 }
284
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformers Agents <Tip warning={true}> `Transformers Agents`是一个实验性的随时可能发生变化的API。由于API或底层模型可能发生变化,`agents`返回的结果也会有所不同。 </Tip> Transformers版本`v4.29.0`基于`tools`和`agents`概念构建。您可以在[此Colab链接](https://colab.research.google.com/drive/1c7MHD-T1forUPGcC_jlwsIptOzpG3hSj)中进行测试。 简而言之,它在`Transformers`之上提供了一个自然语言API:我们定义了一组经过筛选的`tools`,并设计了一个`agents`来解读自然语言并使用这些工具。它具有很强的可扩展性;我们筛选了一些相关的`tools`,但我们将向您展示如何通过社区开发的`tool`轻松地扩展系统。 让我们从一些可以通过这个新API实现的示例开始。在处理多模态任务时它尤其强大,因此让我们快速试着生成图像并大声朗读文本。 ```py agent.run("Caption the following image", image=image) ``` | **输入** | **输出** | |-----------------------------------------------------------------------------------------------------------------------------|-----------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beaver.png" width=200> | A beaver is swimming in the water | --- ```py agent.run("Read the following text out loud", text=text) ``` | **输入** | **输出** | |-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | A beaver is swimming in the water | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tts_example.wav" type="audio/wav"> your browser does not support the audio element. </audio> --- ```py agent.run( "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", document=document, ) ``` | **输入** | **输出** | |-----------------------------------------------------------------------------------------------------------------------------|----------------| | <img src="https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/0/image/image.jpg" width=200> | ballroom foyer | ## 快速入门 要使用 `agent.run`,您需要实例化一个`agent`,它是一个大型语言模型(LLM)。我们支持OpenAI模型以及来自BigCode和OpenAssistant的开源替代方案。OpenAI模型性能更好(但需要您拥有OpenAI API密钥,因此无法免费使用),Hugging Face为BigCode和OpenAssistant模型提供了免费访问端点。 一开始请安装`agents`附加模块,以安装所有默认依赖项。 ```bash pip install transformers[agents] ``` 要使用OpenAI模型,您可以在安装`openai`依赖项后实例化一个`OpenAiAgent`: ```bash pip install openai ``` ```py from transformers import OpenAiAgent agent = OpenAiAgent(model="text-davinci-003", api_key="<your_api_key>") ``` 要使用BigCode或OpenAssistant,请首先登录以访问Inference API: ```py from huggingface_hub import login login("<YOUR_TOKEN>") ``` 然后,实例化`agent`: ```py from transformers import HfAgent # Starcoder agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") # StarcoderBase # agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoderbase") # OpenAssistant # agent = HfAgent(url_endpoint="https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5") ``` 此示例使用了目前Hugging Face免费提供的推理API。如果你有自己的推理端点用于此模型(或其他模型),你可以用你的URL替换上面的URL。 <Tip> StarCoder和OpenAssistant可以免费使用,并且在简单任务上表现出色。然而,当处理更复杂的提示时就不再有效。如果你遇到这样的问题,我们建议尝试使用OpenAI模型,尽管遗憾的是它不是开源的,但它在目前情况下表现更好。 </Tip> 现在,您已经可以开始使用了!让我们深入了解您现在可以使用的两个API。 ### 单次执行(run) 单次执行方法是使用`agent`的 `~Agent.run`: ```py agent.run("Draw me a picture of rivers and lakes.") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> 它会自动选择适合您要执行的任务的`tool`(或`tools`),并以适当的方式运行它们。它可以在同一指令中执行一个或多个任务(尽管您的指令越复杂,`agent`失败的可能性就越大)。 ```py agent.run("Draw me a picture of the sea then transform the picture to add an island") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sea_and_island.png" width=200> <br/> 每个 [`~Agent.run`] 操作都是独立的,因此您可以多次连续运行 [`~Agent.run`]并执行不同的任务。 请注意,您的 `agent` 只是一个大型语言模型,因此您略有变化的提示可能会产生完全不同的结果。重要的是尽可能清晰地解释您要执行的任务。我们在[这里](../en/custom_tools#writing-good-user-inputs)更深入地讨论了如何编写良好的提示。 如果您想在多次执行之间保持同一状态或向`agent`传递非文本对象,可以通过指定`agent`要使用的变量来实现。例如,您可以生成有关河流和湖泊的第一幅图像,并要求模型通过执行以下操作向该图片添加一个岛屿: ```python picture = agent.run("Generate a picture of rivers and lakes.") updated_picture = agent.run("Transform the image in `picture` to add an island to it.", picture=picture) ``` <Tip> 当模型无法理解您的请求和库中的工具时,这可能会有所帮助。例如: ```py agent.run("Draw me the picture of a capybara swimming in the sea") ``` 在这种情况下,模型可以以两种方式理解您的请求: - 使用`text-to-image` 生成在大海中游泳的大水獭 - 或者,使用`text-to-image`生成大水獭,然后使用`image-transformation`工具使其在大海中游泳 如果您想强制使用第一种情景,可以通过将提示作为参数传递给它来实现: ```py agent.run("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") ``` </Tip> ### 基于交流的执行 (chat) 基于交流的执行(chat)方式是使用 [`~Agent.chat`]: ```py agent.chat("Generate a picture of rivers and lakes") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> ```py agent.chat("Transform the picture so that there is a rock in there") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_and_beaver.png" width=200> <br/> 当您希望在不同指令之间保持同一状态时,这会是一个有趣的方法。它更适合用于单个指令,而不是复杂的多步指令(`~Agent.run` 方法更适合处理这种情况)。 这种方法也可以接受参数,以便您可以传递非文本类型或特定提示。 ### ⚠️ 远程执行 出于演示目的以便适用于所有设置,我们为发布版本的少数默认工具创建了远程执行器。这些工具是使用推理终端(inference endpoints)创建的。 目前我们已将其关闭,但为了了解如何自行设置远程执行器工具,我们建议阅读[自定义工具指南](./custom_tools)。 ### 这里发生了什么?什么是`tools`,什么是`agents`? <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/diagram.png"> #### Agents 这里的`Agents`是一个大型语言模型,我们通过提示它以访问特定的工具集。 大型语言模型在生成小代码示例方面表现出色,因此这个API利用这一特点,通过提示LLM生成一个使用`tools`集合的小代码示例。然后,根据您给`Agents`的任务和`tools`的描述来完成此提示。这种方式让它能够访问工具的文档,特别是它们的期望输入和输出,以生成相关的代码。 #### Tools `Tools`非常简单:它们是有名称和描述的单个函数。然后,我们使用这些`tools`的描述来提示代理。通过提示,我们向`agent`展示如何使用`tool`来执行查询语言中请求的操作。 这是使用全新`tools`而不是`pipelines`,因为`agent`编写的代码更好,具有非常原子化的`tools`。`pipelines`经常被重构,并且通常将多个任务合并为一个。`tools`旨在专注于一个非常简单的任务。 #### 代码执行? 然后,这段代码基于`tools`的输入被我们的小型Python解释器执行。我们听到你在后面大声呼喊“任意代码执行!”,但让我们解释为什么情况并非如此。 只能您提供的`tools`和打印函数可以被执行,因此您已经受到了执行的限制。如果仅限于 Hugging Face 工具,那么您应该是安全的。 然后,我们不允许任何属性查找或导入(无论如何都不需要将输入/输出传递给一小组函数),因此所有最明显的攻击(并且您需要提示LLM无论如何输出它们)不应该是一个问题。如果你想超级安全,你可以使用附加参数 return_code=True 执行 run() 方法,在这种情况下,`agent`将只返回要执行的代码,你可以决定是否执行。 如果`agent`生成的代码存在任何尝试执行非法操作的行为,或者代码中出现了常规Python错误,执行将停止。 ### 一组经过精心筛选的`tools` 我们确定了一组可以赋予这些`agent`强大能力的`tools`。以下是我们在`transformers`中集成的`tools`的更新列表: - **文档问答**:给定一个图像格式的文档(例如PDF),回答该文档上的问题([Donut](../en/model_doc/donut)) - **文本问答**:给定一段长文本和一个问题,回答文本中的问题([Flan-T5](../en/model_doc/flan-t5)) - **无条件图像字幕**:为图像添加字幕!([BLIP](../en/model_doc/blip)) - **图像问答**:给定一张图像,回答该图像上的问题([VILT](../en/model_doc/vilt)) - **图像分割**:给定一张图像和一个提示,输出该提示的分割掩模([CLIPSeg](../en/model_doc/clipseg)) - **语音转文本**:给定一个人说话的音频录音,将演讲内容转录为文本([Whisper](../en/model_doc/whisper)) - **文本转语音**:将文本转换为语音([SpeechT5](../en/model_doc/speecht5)) - **Zero-Shot文本分类**:给定一个文本和一个标签列表,确定文本最符合哪个标签([BART](../en/model_doc/bart)) - **文本摘要**:总结长文本为一两句话([BART](../en/model_doc/bart)) - **翻译**:将文本翻译为指定语言([NLLB](../en/model_doc/nllb)) 这些`tools`已在transformers中集成,并且也可以手动使用,例如: ```py from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### 自定义工具 尽管我们确定了一组经过筛选的`tools`,但我们坚信,此实现提供的主要价值在于能够快速创建和共享自定义`tool`。 通过将工具的代码上传到Hugging Face空间或模型repository,您可以直接通过`agent`使用`tools`。我们已经添加了一些**与transformers无关**的`tools`到[`huggingface-tools`组织](https://huggingface.co/huggingface-tools)中: - **文本下载器**:从Web URL下载文本 - **文本到图像**:根据提示生成图像,利用`stable diffusion` - **图像转换**:根据初始图像和提示修改图像,利用`instruct pix2pix stable diffusion` - **文本到视频**:根据提示生成小视频,利用`damo-vilab` 从一开始就一直在使用的文本到图像`tool`是一个远程`tool `,位于[*huggingface-tools/text-to-image*](https://huggingface.co/spaces/huggingface-tools/text-to-image)!我们将继续在此组织和其他组织上发布此类`tool`,以进一步增强此实现。 `agents`默认可以访问存储在[`huggingface-tools`](https://huggingface.co/huggingface-tools)上的`tools`。我们将在后续指南中解释如何编写和共享自定义`tools`,以及如何利用Hub上存在的任何自定义`tools`。 ### 代码生成 到目前为止,我们已经展示了如何使用`agents`来为您执行操作。但是,`agents`仅使用非常受限Python解释器执行的代码。如果您希望在不同的环境中使用生成的代码,可以提示`agents`返回代码,以及`tools`的定义和准确的导入信息。 例如,以下指令 ```python agent.run("Draw me a picture of rivers and lakes", return_code=True) ``` 返回以下代码 ```python from transformers import load_tool image_generator = load_tool("huggingface-tools/text-to-image") image = image_generator(prompt="rivers and lakes") ``` 然后你就可以调整并执行代码
transformers/docs/source/zh/transformers_agents.md/0
{ "file_path": "transformers/docs/source/zh/transformers_agents.md", "repo_id": "transformers", "token_count": 8118 }
285
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image Classification training examples The following example showcases how to train/fine-tune `ViT` for image-classification using the JAX/Flax backend. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. In this example we will train/fine-tune the model on the [imagenette](https://github.com/fastai/imagenette) dataset. ## Prepare the dataset We will use the [imagenette](https://github.com/fastai/imagenette) dataset to train/fine-tune our model. Imagenette is a subset of 10 easily classified classes from Imagenet (tench, English springer, cassette player, chain saw, church, French horn, garbage truck, gas pump, golf ball, parachute). ### Download and extract the data. ```bash wget https://s3.amazonaws.com/fast-ai-imageclas/imagenette2.tgz tar -xvzf imagenette2.tgz ``` This will create a `imagenette2` dir with two subdirectories `train` and `val` each with multiple subdirectories per class. The training script expects the following directory structure ```bash root/dog/xxx.png root/dog/xxy.png root/dog/[...]/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/[...]/asd932_.png ``` ## Train the model Next we can run the example script to fine-tune the model: ```bash python run_image_classification.py \ --output_dir ./vit-base-patch16-imagenette \ --model_name_or_path google/vit-base-patch16-224-in21k \ --train_dir="imagenette2/train" \ --validation_dir="imagenette2/val" \ --num_train_epochs 5 \ --learning_rate 1e-3 \ --per_device_train_batch_size 128 --per_device_eval_batch_size 128 \ --overwrite_output_dir \ --preprocessing_num_workers 32 \ --push_to_hub ``` This should finish in ~7mins with 99% validation accuracy.
transformers/examples/flax/vision/README.md/0
{ "file_path": "transformers/examples/flax/vision/README.md", "repo_id": "transformers", "token_count": 775 }
286
#!/usr/bin/env bash if ! [ -f ./dev.txt ]; then echo "Download dev dataset...." curl -L -o ./dev.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-dev.conllu' fi if ! [ -f ./test.txt ]; then echo "Download test dataset...." curl -L -o ./test.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-test.conllu' fi if ! [ -f ./train.txt ]; then echo "Download train dataset...." curl -L -o ./train.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-train.conllu' fi export MAX_LENGTH=200 export BERT_MODEL=bert-base-uncased export OUTPUT_DIR=postagger-model export BATCH_SIZE=32 export NUM_EPOCHS=3 export SAVE_STEPS=750 export SEED=1 # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" python3 run_ner.py --data_dir ./ \ --task_type POS \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --num_train_epochs $NUM_EPOCHS \ --train_batch_size $BATCH_SIZE \ --seed $SEED \ --gpus 1 \ --do_train \ --do_predict
transformers/examples/legacy/pytorch-lightning/run_pos.sh/0
{ "file_path": "transformers/examples/legacy/pytorch-lightning/run_pos.sh", "repo_id": "transformers", "token_count": 440 }
287
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seq2seq_trainer import Seq2SeqTrainer from seq2seq_training_args import Seq2SeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( Seq2SeqDataCollator, Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_encoder: bool = field(default=False, metadata={"help": "Whether tp freeze the encoder."}) freeze_embeds: bool = field(default=False, metadata={"help": "Whether to freeze the embeddings."}) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ data_dir: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) task: Optional[str] = field( default="summarization", metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=142, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) test_max_target_length: Optional[int] = field( default=142, metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) n_train: Optional[int] = field(default=-1, metadata={"help": "# training examples. -1 means use all."}) n_val: Optional[int] = field(default=-1, metadata={"help": "# validation examples. -1 means use all."}) n_test: Optional[int] = field(default=-1, metadata={"help": "# test examples. -1 means use all."}) src_lang: Optional[str] = field(default=None, metadata={"help": "Source language id for translation."}) tgt_lang: Optional[str] = field(default=None, metadata={"help": "Target language id for translation."}) eval_beams: Optional[int] = field(default=None, metadata={"help": "# num_beams to use for evaluation."}) ignore_pad_token_for_loss: bool = field( default=True, metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."}, ) def handle_metrics(split, metrics, output_dir): """ Log and save metrics Args: - split: one of train, val, test - metrics: metrics dict - output_dir: where to save the metrics """ logger.info(f"***** {split} metrics *****") for key in sorted(metrics.keys()): logger.info(f" {key} = {metrics[key]}") save_json(metrics, os.path.join(output_dir, f"{split}_results.json")) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() check_output_dir(training_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED), training_args.fp16, ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(training_args, p, None): assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(config, p, getattr(training_args, p)) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=".ckpt" in model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(model, data_args.task) # set num_beams for evaluation if data_args.eval_beams is None: data_args.eval_beams = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.tgt_lang) if model_args.freeze_embeds: freeze_embeds(model) if model_args.freeze_encoder: freeze_params(model.get_encoder()) assert_all_frozen(model.get_encoder()) dataset_class = Seq2SeqDataset # Get datasets train_dataset = ( dataset_class( tokenizer, type_path="train", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_train else None ) eval_dataset = ( dataset_class( tokenizer, type_path="val", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) test_dataset = ( dataset_class( tokenizer, type_path="test", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_predict else None ) # Initialize our Trainer compute_metrics_fn = ( build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None ) trainer = Seq2SeqTrainer( model=model, args=training_args, data_args=data_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=Seq2SeqDataCollator( tokenizer, data_args, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=compute_metrics_fn, tokenizer=tokenizer, ) all_metrics = {} # Training if training_args.do_train: logger.info("*** Train ***") train_result = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) metrics = train_result.metrics metrics["train_n_objs"] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train", metrics, training_args.output_dir) all_metrics.update(metrics) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(metric_key_prefix="val") metrics["val_n_objs"] = data_args.n_val metrics["val_loss"] = round(metrics["val_loss"], 4) if trainer.is_world_process_zero(): handle_metrics("val", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.do_predict: logger.info("*** Predict ***") test_output = trainer.predict(test_dataset=test_dataset, metric_key_prefix="test") metrics = test_output.metrics metrics["test_n_objs"] = data_args.n_test if trainer.is_world_process_zero(): metrics["test_loss"] = round(metrics["test_loss"], 4) handle_metrics("test", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.predict_with_generate: test_preds = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) test_preds = lmap(str.strip, test_preds) write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt")) if trainer.is_world_process_zero(): save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json")) return all_metrics def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/seq2seq/finetune_trainer.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/finetune_trainer.py", "repo_id": "transformers", "token_count": 5726 }
288
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. export WANDB_PROJECT=distilbart-trainer export BS=32 export m=sshleifer/student_cnn_12_6 export tok=facebook/bart-large export MAX_TGT_LEN=142 python finetune_trainer.py \ --model_name_or_path $m --tokenizer_name $tok \ --data_dir cnn_dm \ --output_dir distilbart-cnn-12-6 --overwrite_output_dir \ --learning_rate=3e-5 \ --warmup_steps 500 --sortish_sampler \ --fp16 \ --n_val 500 \ --gradient_accumulation_steps=1 \ --per_device_train_batch_size=$BS --per_device_eval_batch_size=$BS \ --freeze_encoder --freeze_embeds \ --num_train_epochs=2 \ --save_steps 3000 --eval_steps 3000 \ --logging_first_step \ --max_target_length 56 --val_max_target_length $MAX_TGT_LEN --test_max_target_length $MAX_TGT_LEN\ --do_train --do_eval --do_predict \ --evaluation_strategy steps \ --predict_with_generate --sortish_sampler \ "$@"
transformers/examples/legacy/seq2seq/train_distilbart_cnn.sh/0
{ "file_path": "transformers/examples/legacy/seq2seq/train_distilbart_cnn.sh", "repo_id": "transformers", "token_count": 541 }
289
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000): """Randomly sample chunks of `max_length` seconds from the input audio""" sample_length = int(round(sample_rate * max_length)) if len(wav) <= sample_length: return wav random_offset = randint(0, len(wav) - sample_length - 1) return wav[random_offset : random_offset + sample_length] @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field(default=None, metadata={"help": "Name of a dataset from the datasets package"}) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "A file containing the training audio paths and labels."} ) eval_file: Optional[str] = field( default=None, metadata={"help": "A file containing the validation audio paths and labels."} ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="validation", metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) label_column_name: str = field( default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_length_seconds: float = field( default=20, metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."}, ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="facebook/wav2vec2-base", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) feature_extractor_name: Optional[str] = field( default=None, metadata={"help": "Name or path of preprocessor config."} ) freeze_feature_encoder: bool = field( default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) attention_mask: bool = field( default=True, metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) freeze_feature_extractor: Optional[bool] = field( default=None, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def __post_init__(self): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder` " "instead. Setting `freeze_feature_encoder==True`.", FutureWarning, ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`. " "Only make use of `--freeze_feature_encoder`." ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. raw_datasets = DatasetDict() raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, token=model_args.token, ) raw_datasets["eval"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, token=model_args.token, ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--audio_column_name` to the correct audio column - one of " f"{', '.join(raw_datasets['train'].column_names)}." ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " f"{', '.join(raw_datasets['train'].column_names)}." ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) model_input_name = feature_extractor.model_input_names[0] def train_transforms(batch): """Apply train_transforms across a batch.""" subsampled_wavs = [] for audio in batch[data_args.audio_column_name]: wav = random_subsample( audio["array"], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(wav) inputs = feature_extractor(subsampled_wavs, sampling_rate=feature_extractor.sampling_rate) output_batch = {model_input_name: inputs.get(model_input_name)} output_batch["labels"] = list(batch[data_args.label_column_name]) return output_batch def val_transforms(batch): """Apply val_transforms across a batch.""" wavs = [audio["array"] for audio in batch[data_args.audio_column_name]] inputs = feature_extractor(wavs, sampling_rate=feature_extractor.sampling_rate) output_batch = {model_input_name: inputs.get(model_input_name)} output_batch["labels"] = list(batch[data_args.label_column_name]) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = raw_datasets["train"].features[data_args.label_column_name].names label2id, id2label = {}, {} for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label # Load the accuracy metric from the datasets package metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids) config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, num_labels=len(labels), label2id=label2id, id2label=id2label, finetuning_task="audio-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: raw_datasets["train"] = ( raw_datasets["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms raw_datasets["train"].set_transform(train_transforms, output_all_columns=False) if training_args.do_eval: if data_args.max_eval_samples is not None: raw_datasets["eval"] = ( raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms raw_datasets["eval"].set_transform(val_transforms, output_all_columns=False) # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=raw_datasets["train"] if training_args.do_train else None, eval_dataset=raw_datasets["eval"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=feature_extractor, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
transformers/examples/pytorch/audio-classification/run_audio_classification.py/0
{ "file_path": "transformers/examples/pytorch/audio-classification/run_audio_classification.py", "repo_id": "transformers", "token_count": 7223 }
290
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=text-generation """ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments. import logging import math import os import sys import warnings from dataclasses import dataclass, field from itertools import chain from typing import Optional import datasets import evaluate import torch from datasets import load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, is_torch_xla_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.40.0.dev0") require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) torch_dtype: Optional[str] = field( default=None, metadata={ "help": ( "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the " "dtype will be automatically derived from the model's weights." ), "choices": ["auto", "bfloat16", "float16", "float32"], }, ) low_cpu_mem_usage: bool = field( default=False, metadata={ "help": ( "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded. " "set True will benefit LLM loading time and RAM consumption." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"}) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) keep_linebreaks: bool = field( default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."} ) def __post_init__(self): if self.streaming: require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`") if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, streaming=data_args.streaming, ) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, streaming=data_args.streaming, ) raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, streaming=data_args.streaming, ) else: data_files = {} dataset_args = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = ( data_args.train_file.split(".")[-1] if data_args.train_file is not None else data_args.validation_file.split(".")[-1] ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, **dataset_args, ) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, token=model_args.token, **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, token=model_args.token, **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "token": model_args.token, "trust_remote_code": model_args.trust_remote_code, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "token": model_args.token, "trust_remote_code": model_args.trust_remote_code, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, torch_dtype=torch_dtype, low_cpu_mem_usage=model_args.low_cpu_mem_usage, ) else: model = AutoModelForCausalLM.from_config(config, trust_remote_code=model_args.trust_remote_code) n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = list(raw_datasets["train"].features) else: column_names = list(raw_datasets["validation"].features) text_column_name = "text" if "text" in column_names else column_names[0] # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples[text_column_name]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output with training_args.main_process_first(desc="dataset map tokenization"): if not data_args.streaming: tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) else: tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, remove_columns=column_names, ) if hasattr(config, "max_position_embeddings"): max_pos_embeddings = config.max_position_embeddings else: # Define a default value if the attribute is missing in the config. max_pos_embeddings = 1024 if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > max_pos_embeddings: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " f"Using block_size={min(1024, max_pos_embeddings)} instead. You can change that default value by passing --block_size xxx." ) if max_pos_embeddings > 0: block_size = min(1024, max_pos_embeddings) else: block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model " f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, and if the total_length < block_size we exclude this batch and return an empty dict. # We could add padding if the model supported it instead of this drop, you can customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/process#map with training_args.main_process_first(desc="grouping texts together"): if not data_args.streaming: lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) else: lm_datasets = tokenized_datasets.map( group_texts, batched=True, ) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = lm_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = lm_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return metric.compute(predictions=preds, references=labels) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_xla_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_xla_available() else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/language-modeling/run_clm.py/0
{ "file_path": "transformers/examples/pytorch/language-modeling/run_clm.py", "repo_id": "transformers", "token_count": 12007 }
291
#! /usr/bin/python3 import argparse import logging import os import sys from collections import namedtuple import torch from modeling_bertabs import BertAbs, build_predictor from torch.utils.data import DataLoader, SequentialSampler from tqdm import tqdm from transformers import BertTokenizer from .utils_summarization import ( CNNDMDataset, build_mask, compute_token_type_ids, encode_for_summarization, truncate_or_pad, ) logger = logging.getLogger(__name__) logging.basicConfig(stream=sys.stdout, level=logging.INFO) Batch = namedtuple("Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]) def evaluate(args): tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", do_lower_case=True) model = BertAbs.from_pretrained("remi/bertabs-finetuned-extractive-abstractive-summarization") model.to(args.device) model.eval() symbols = { "BOS": tokenizer.vocab["[unused0]"], "EOS": tokenizer.vocab["[unused1]"], "PAD": tokenizer.vocab["[PAD]"], } if args.compute_rouge: reference_summaries = [] generated_summaries = [] import nltk import rouge nltk.download("punkt") rouge_evaluator = rouge.Rouge( metrics=["rouge-n", "rouge-l"], max_n=2, limit_length=True, length_limit=args.beam_size, length_limit_type="words", apply_avg=True, apply_best=False, alpha=0.5, # Default F1_score weight_factor=1.2, stemming=True, ) # these (unused) arguments are defined to keep the compatibility # with the legacy code and will be deleted in a next iteration. args.result_path = "" args.temp_dir = "" data_iterator = build_data_iterator(args, tokenizer) predictor = build_predictor(args, tokenizer, symbols, model) logger.info("***** Running evaluation *****") logger.info(" Number examples = %d", len(data_iterator.dataset)) logger.info(" Batch size = %d", args.batch_size) logger.info("") logger.info("***** Beam Search parameters *****") logger.info(" Beam size = %d", args.beam_size) logger.info(" Minimum length = %d", args.min_length) logger.info(" Maximum length = %d", args.max_length) logger.info(" Alpha (length penalty) = %.2f", args.alpha) logger.info(" Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT")) for batch in tqdm(data_iterator): batch_data = predictor.translate_batch(batch) translations = predictor.from_batch(batch_data) summaries = [format_summary(t) for t in translations] save_summaries(summaries, args.summaries_output_dir, batch.document_names) if args.compute_rouge: reference_summaries += batch.tgt_str generated_summaries += summaries if args.compute_rouge: scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries) str_scores = format_rouge_scores(scores) save_rouge_scores(str_scores) print(str_scores) def save_summaries(summaries, path, original_document_name): """Write the summaries in fies that are prefixed by the original files' name with the `_summary` appended. Attributes: original_document_names: List[string] Name of the document that was summarized. path: string Path were the summaries will be written summaries: List[string] The summaries that we produced. """ for summary, document_name in zip(summaries, original_document_name): # Prepare the summary file's name if "." in document_name: bare_document_name = ".".join(document_name.split(".")[:-1]) extension = document_name.split(".")[-1] name = bare_document_name + "_summary." + extension else: name = document_name + "_summary" file_path = os.path.join(path, name) with open(file_path, "w") as output: output.write(summary) def format_summary(translation): """Transforms the output of the `from_batch` function into nicely formatted summaries. """ raw_summary, _, _ = translation summary = ( raw_summary.replace("[unused0]", "") .replace("[unused3]", "") .replace("[PAD]", "") .replace("[unused1]", "") .replace(r" +", " ") .replace(" [unused2] ", ". ") .replace("[unused2]", "") .strip() ) return summary def format_rouge_scores(scores): return """\n ****** ROUGE SCORES ****** ** ROUGE 1 F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f} ** ROUGE 2 F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f} ** ROUGE L F1 >> {:.3f} Precision >> {:.3f} Recall >> {:.3f}""".format( scores["rouge-1"]["f"], scores["rouge-1"]["p"], scores["rouge-1"]["r"], scores["rouge-2"]["f"], scores["rouge-2"]["p"], scores["rouge-2"]["r"], scores["rouge-l"]["f"], scores["rouge-l"]["p"], scores["rouge-l"]["r"], ) def save_rouge_scores(str_scores): with open("rouge_scores.txt", "w") as output: output.write(str_scores) # # LOAD the dataset # def build_data_iterator(args, tokenizer): dataset = load_and_cache_examples(args, tokenizer) sampler = SequentialSampler(dataset) def collate_fn(data): return collate(data, tokenizer, block_size=512, device=args.device) iterator = DataLoader( dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate_fn, ) return iterator def load_and_cache_examples(args, tokenizer): dataset = CNNDMDataset(args.documents_dir) return dataset def collate(data, tokenizer, block_size, device): """Collate formats the data passed to the data loader. In particular we tokenize the data batch after batch to avoid keeping them all in memory. We output the data as a namedtuple to fit the original BertAbs's API. """ data = [x for x in data if not len(x[1]) == 0] # remove empty_files names = [name for name, _, _ in data] summaries = [" ".join(summary_list) for _, _, summary_list in data] encoded_text = [encode_for_summarization(story, summary, tokenizer) for _, story, summary in data] encoded_stories = torch.tensor( [truncate_or_pad(story, block_size, tokenizer.pad_token_id) for story, _ in encoded_text] ) encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id) encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id) batch = Batch( document_names=names, batch_size=len(encoded_stories), src=encoded_stories.to(device), segs=encoder_token_type_ids.to(device), mask_src=encoder_mask.to(device), tgt_str=summaries, ) return batch def decode_summary(summary_tokens, tokenizer): """Decode the summary and return it in a format suitable for evaluation. """ summary_tokens = summary_tokens.to("cpu").numpy() summary = tokenizer.decode(summary_tokens) sentences = summary.split(".") sentences = [s + "." for s in sentences] return sentences def main(): """The main function defines the interface with the users.""" parser = argparse.ArgumentParser() parser.add_argument( "--documents_dir", default=None, type=str, required=True, help="The folder where the documents to summarize are located.", ) parser.add_argument( "--summaries_output_dir", default=None, type=str, required=False, help="The folder in wich the summaries should be written. Defaults to the folder where the documents are", ) parser.add_argument( "--compute_rouge", default=False, type=bool, required=False, help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.", ) # EVALUATION options parser.add_argument( "--no_cuda", default=False, type=bool, help="Whether to force the execution on CPU.", ) parser.add_argument( "--batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.", ) # BEAM SEARCH arguments parser.add_argument( "--min_length", default=50, type=int, help="Minimum number of tokens for the summaries.", ) parser.add_argument( "--max_length", default=200, type=int, help="Maixmum number of tokens for the summaries.", ) parser.add_argument( "--beam_size", default=5, type=int, help="The number of beams to start with for each example.", ) parser.add_argument( "--alpha", default=0.95, type=float, help="The value of alpha for the length penalty in the beam search.", ) parser.add_argument( "--block_trigram", default=True, type=bool, help="Whether to block the existence of repeating trigrams in the text generated by beam search.", ) args = parser.parse_args() # Select device (distibuted not available) args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") # Check the existence of directories if not args.summaries_output_dir: args.summaries_output_dir = args.documents_dir if not documents_dir_is_valid(args.documents_dir): raise FileNotFoundError( "We could not find the directory you specified for the documents to summarize, or it was empty. Please" " specify a valid path." ) os.makedirs(args.summaries_output_dir, exist_ok=True) evaluate(args) def documents_dir_is_valid(path): if not os.path.exists(path): return False file_list = os.listdir(path) if len(file_list) == 0: return False return True if __name__ == "__main__": main()
transformers/examples/research_projects/bertabs/run_summarization.py/0
{ "file_path": "transformers/examples/research_projects/bertabs/run_summarization.py", "repo_id": "transformers", "token_count": 4319 }
292
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm NON_ALPHA = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex MIN_NUM_TOKENS = 10 NUM_PERM = 256 def get_min_hash(tokens: List[str]) -> Optional[MinHash]: """Compute the MinHash of a code snippet.""" if len(tokens) < MIN_NUM_TOKENS: return None min_hash = MinHash(num_perm=NUM_PERM) for token in set(tokens): min_hash.update(token.encode()) return min_hash def get_tokens(code: str) -> Set[str]: """Tokenize a code snippet.""" return {t for t in NON_ALPHA.split(code) if len(t.strip()) > 0} class DuplicationIndex: def __init__( self, *, duplication_jaccard_threshold: float = 0.85, ): self._duplication_jaccard_threshold = duplication_jaccard_threshold self._num_perm = NUM_PERM self._index = MinHashLSH(threshold=self._duplication_jaccard_threshold, num_perm=self._num_perm) self._duplicate_clusters = defaultdict(set) def add(self, code_key: Tuple, min_hash: MinHash) -> None: """Add a key to _index (MinHashLSH) the min_hash is used to query closest matches based on the jaccard_threshold. The new key is either added to a existing cluster of one close match, or a new cluster is created. The clusters created in this way, depend on the order of add. Args: code_key (Tuple of (index, repo_name, path)): Theoritically any hasbale key. Here we use a tuple to retrieve the information later. min_hash: MinHash of the code_key. """ close_duplicates = self._index.query(min_hash) if code_key in self._index.keys: print(f"Duplicate key {code_key}") return self._index.insert(code_key, min_hash) if len(close_duplicates) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(code_key) break else: self._duplicate_clusters[close_duplicates[0]].add(code_key) def get_duplicate_clusters(self) -> List[List[Dict]]: """Export the duplicate clusters. For each cluster, the first element is the base element of the cluster. The base element has an estimation jaccard similarity higher than the threshold with all the other elements. Returns: duplicate_clusters (List[List[Dict]]): List of duplicate clusters. """ duplicate_clusters = [] for base, duplicates in self._duplicate_clusters.items(): cluster = [base] + list(duplicates) # reformat the cluster to be a list of dict cluster = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster] duplicate_clusters.append(cluster) return duplicate_clusters def save(self, filepath) -> None: duplicate_clusters = self.get_duplicate_clusters() with open(filepath, "w") as f: json.dump(duplicate_clusters, f) def _compute_min_hash(element): index, data = element min_hash = get_min_hash([t for t in NON_ALPHA.split(data["content"]) if len(t.strip()) > 0]) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def minhash_iter(dataset_iterator: Type[Dataset]): with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash, ThreadedIterator(dataset_iterator, max_queue_size=10000), chunksize=100, ): if data is not None: yield data def make_duplicate_clusters(dataset_iterator: Type[Dataset], jaccard_threshold: float): """Find duplicate clusters in the dataset in two steps: 1. Compute MinHash for each code snippet. MinHash is a tool for fast jaccard similarity estimation. This step is computed using an asynchronous multiprocessing pool, minhash_iter 2. Find duplicate clusters. The computed MinHash is added sequentially to the DuplicationIndex. This step cannot be parallelized. So using asynchronous thread in the previous step helps to speed up the process. """ di = DuplicationIndex(duplication_jaccard_threshold=jaccard_threshold) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(dataset_iterator)), max_queue_size=100)): di.add(filename, min_hash) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def jaccard_similarity(code1: str, code2: str) -> float: """Compute the Jaccard similarity of two code snippets.""" tokens1 = get_tokens(code1) tokens2 = get_tokens(code2) return len(tokens1 & tokens2) / len(tokens1 | tokens2) _shared_dataset = None def _find_cluster_extremes_shared(cluster, jaccard_threshold): """Find a reduced cluster such that each code in the origin cluster is similar to at least one code in the reduced cluster. Two codes are similar if their Jaccard similarity is above the threshold. Args: cluster (List[dict]): cluster is a list of dict, each dict contains the following keys: - base_index - repo_name - path This is a typical output of DuplicationIndex.get_duplicate_clusters() jaccard_threshold (float): threshold for Jaccard similarity. Two codes are similar if their Jaccard similarity is above the threshold. Returns: extremes (List[dict]): A reduced representation of the cluster. The field copies is added to each dict. The copies field indicates the number of similar codes in the cluster for a extreme. """ extremes = [] for element1 in cluster: code1 = _shared_dataset[element1["base_index"]]["content"] for element2 in extremes: code2 = _shared_dataset[element2["base_index"]]["content"] if jaccard_similarity(code1, code2) >= jaccard_threshold: element2["copies"] += 1 break else: element1["copies"] = 1 extremes.append(element1) return extremes def find_extremes(cluster_list, dataset, jaccard_threshold): """Call the _find_cluster_extremes_shared function in a parallel fashion. Args: cluster_list (List[List[Dict]]): each cluster is a list of dicts with the key base_index, referring to the index of the base code in the dataset. dataset (Type[Dataset]): dataset is used to access the content of the code snippets, using the base_index from the cluster_list. dataset is shared between all the processes using a glabal variable (any other way to share the dataset?), otherwise the multi processing is not speeded up. jaccard_threshold (float): the threshold for the jaccard similarity. The default value is 0.85 Returns: extremes_list (List[Dict]): Each cluster is reduced to extremes. See _find_cluster_extremes_shared for the definition of extremes. """ global _shared_dataset _shared_dataset = dataset extremes_list = [] f = partial(_find_cluster_extremes_shared, jaccard_threshold=jaccard_threshold) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( f, cluster_list, ), total=len(cluster_list), ): extremes_list.append(extremes) return extremes_list def deduplicate_dataset( dataset: Type[Dataset], jaccard_threshold: float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]: """Deduplicate the dataset using minhash and jaccard similarity. This function first generate duplicate clusters, then each cluster is reduced to the extremes that are similar to the other elements in the cluster. Codes are called similar if their Jaccard similarity is greater than jaccard_threshold (0.85 default). Args: dataset (Type[Dataset]): The dataset to deduplicate. jaccard_threshold (float, default=0.85): jaccard threshold to determine if two codes are similar Returns: ds_dedup (Type[Dataset]): The deduplicated dataset. duplicate_clusters (List[List[Dict]]): The list of duplicate clusters. Each cluster is a list of dicts with the following keys: - base_index : int The index of the code in the original dataset. - repo_name : str - path : str - copies : int The number of copies of the code in the cluster. (find_cluster_extremes) - is_extreme : bool Whether the code is an extreme in the cluster. All the codes in the cluster are removed from the dataset except the extremes. Example: >>> from datasets import load_dataset >>> from minhash_deduplication import deduplicate_dataset >>> ds = load_dataset("lvwerra/codeparrot-clean", split="train") >>> ds_dedup, duplicate_clusters = deduplicate_dataset(ds, jaccard_threshold=0.85) """ duplicate_clusters = make_duplicate_clusters(dataset, jaccard_threshold) duplicate_indices = {x["base_index"] for cluster in duplicate_clusters for x in cluster} extreme_dict = {} extremes_clusters = find_extremes(duplicate_clusters, dataset, jaccard_threshold) for extremes in extremes_clusters: for element in extremes: extreme_dict[element["base_index"]] = element remove_indices = duplicate_indices - set(extreme_dict.keys()) ds_filter = dataset.filter(lambda x, idx: idx not in remove_indices, with_indices=True) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: element["is_extreme"] = element["base_index"] in extreme_dict if element["is_extreme"]: element["copies"] = extreme_dict[element["base_index"]]["copies"] print(f"Original dataset size: {len(dataset)}") print(f"Number of duplicate clusters: {len(duplicate_clusters)}") print(f"Files in duplicate cluster: {len(duplicate_indices)}") print(f"Unique files in duplicate cluster: {len(extreme_dict)}") print(f"Filtered dataset size: {len(ds_filter)}") return ds_filter, duplicate_clusters
transformers/examples/research_projects/codeparrot/scripts/minhash_deduplication.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/minhash_deduplication.py", "repo_id": "transformers", "token_count": 4391 }
293
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f class DeeBertTests(TestCasePlus): def setup(self) -> None: stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) def run_and_check(self, args): n_gpu = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0, "run_glue_deebert.py") with patch.object(sys, "argv", args): result = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(value, 0.666) @slow @require_torch_non_multi_gpu def test_glue_deebert_train(self): train_args = """ --model_type roberta --model_name_or_path FacebookAI/roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage """.split() self.run_and_check(train_args) eval_args = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(eval_args) entropy_eval_args = """ --model_type roberta --model_name_or_path ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/FacebookAI/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 """.split() self.run_and_check(entropy_eval_args)
transformers/examples/research_projects/deebert/test_glue_deebert.py/0
{ "file_path": "transformers/examples/research_projects/deebert/test_glue_deebert.py", "repo_id": "transformers", "token_count": 1881 }
294
{ "initializer_range": 0.02, "layer_norm_epsilon": 0.00001, "n_embd": 768, "n_head": 12, "n_layer": 6, "n_positions": 1024, "vocab_size": 50257 }
transformers/examples/research_projects/distillation/training_configs/distilgpt2.json/0
{ "file_path": "transformers/examples/research_projects/distillation/training_configs/distilgpt2.json", "repo_id": "transformers", "token_count": 79 }
295
# How to propose a Flax/JAX + Transformers project Great that you've opened this document! While we at 🤗 are proposing a couple of projects, we strongly believe that the community can come up with much more **creative**, **fun**, and **impactful** projects on their own. This being said, we are really looking forward to seeing your project proposal! ## What a project should be about The proposed project should fall into the machine learning fields of **Natural Language Processing (NLP)** and/or **Computer Vision (CV)** (possibly also **Speech Recognition (ASR)** depending on whether Speech Recognition models are available in Flax in due time) and aim at solving a specific task. Possible tasks can belong to: * text classification * text generation * image recognition * image processing * image captioning * audio classification * and other tasks you can think of! The clearer a task is defined, the better your project proposal is. *E.g.* "Using a T5 model to learn grammar correction in French" or "Adapting a pre-trained CLIP model for zero-shot image classification in Spanish" are **well-defined and clear** project proposals, while something like "Train a language model" or "Image classification" are **too vague**. There is no limit to your creativity as long as the project is feasible and ethical. The more creative & specific your project proposal, the more interesting it will be, and the more likely will you find motivated team members to work on your project! To get an idea of how to formulate your project proposals, you can browse through existing project proposals on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22). ## How to submit a project proposal First, you should make sure that you are [logged in](https://huggingface.co/login?sso=bm9uY2U9OTRlNjZjZmZhYjMwMmJmMWMyYjc5MmFiMTMyMzY5ODYmcmV0dXJuX3Nzb191cmw9aHR0cHMlM0ElMkYlMkZkaXNjdXNzLmh1Z2dpbmdmYWNlLmNvJTJGc2Vzc2lvbiUyRnNzb19sb2dpbg%3D%3D&sig=429ad8924bcb33c40f9823027ea749abb55d393f4f58924f36a2dba3ab0a48da) with your Hugging Face account on the forum. Second, make sure that your project idea doesn't already exist by checking [existing projects](https://discuss.huggingface.co/c/flax-jax-projects/22). If your project already exists - great! This means that you can comment and improve the existing idea and join the project to form a team! If your project idea already exists for a different language, feel free to submit the same project idea, just in a different language. Third, having ensured that your project doesn't exist, click on the *"New Topic"* button on the [Flax/JAX Projects Forum category](https://discuss.huggingface.co/c/flax-jax-projects/22) to create a new project proposal. Fourth, make sure that your project proposal includes the following information: 1. *A clear description of the project* 2. *In which language should the project be conducted?* English, German, Chinese, ...? It can also be a multi-lingual project 3. *Which model should be used?* If you want to adapt an existing model, you can add the link to one of the 4000 available checkpoints in JAX [here](https://huggingface.co/models?filter=jax) If you want to train a model from scratch, you can simply state the model architecture to be used, *e.g.* BERT, CLIP, etc. You can also base your project on a model that is not part of transformers. For an overview of libraries based on JAX, you can take a look at [awesome-jax](https://github.com/n2cholas/awesome-jax#awesome-jax-). **Note** that for a project that is not based on Transformers it will be more difficult for the 🤗 team to help you. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what model architectures are currently supported in 🤗 Transformers. 4. *What data should be used?* It is important to state at least what kind of data you would like to use. Ideally, you can already point to publicly available data or a dataset in the 🤗 Datasets library. 5. *Are similar training scripts available in Flax/JAX?* It would be important to find similar training scripts that already exist in Flax/JAX. *E.g.* if you are working on a Seq-to-Seq task, you can make use of the [`run_summarization_flax.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) script which is very similar to any seq2seq training. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what training scripts are currently supported in 🤗 Transformers. 6. *(Optionally) What are possible challenges?* List possible difficulties with your project. *E.g.* If you know that training convergence usually takes a lot of time, it is worth stating this here! 7. *(Optionally) What is the desired project outcome?* - How would you like to demo your project? One could *e.g.* create a Streamlit application. 8. *(Optionally) Links to read upon* - Can you provide any links that would help the reader to better understand your project idea? Feel free to copy-paste the following format for your project proposal and fill out the respective sections: ``` # <FILL ME: Name of project> <FILL ME: A clear description of the project> ## 2. Language The model will be trained in <FILL ME: which language?>. ## 3. Model <FILL ME: 3. Which model should be used?> ## 4. Datasets <FILL ME: 4. Which data should be used?> Possible links to publicly available datasets include: - <FILL ME: Link 1 to dataset> - <FILL ME: Link 2 to dataset> - <FILL ME: Link 3 to dataset> ## 5. Training scripts <FILL ME: 5. Are there publicly available training scripts that can be used/tweaked for the project?> We can make use of <FILL ME: link to training script> to train the model.> ## 6. (Optional) Challenges <(Optionally) FILL ME: 6. What are possible challenges?> ## 7. (Optional) Desired project outcome <(Optionally) FILL ME: 7. What is the desired project outcome? A demo?> ## 8. (Optional) Reads The following links can be useful to better understand the project and what has previously been done. - <FILL ME: Link 1 to read> - <FILL ME: Link 2 to read> - <FILL ME: Link 3 to read> ``` To see how a proposed project looks like, please have a look at submitted project proposals [here](https://discuss.huggingface.co/c/flax-jax-projects/22). ## Will my project proposal be selected? Having submitted a project proposal, you can now promote your idea in the Slack channel `#flax-jax-community-week` to try to convince other participants to join your project! Once other people have joined your project, one of the organizers (`@Suzana, @valhalla, @osanseviero, @patrickvonplaten`) will officially create a team for your project and add your project to [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing).
transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md", "repo_id": "transformers", "token_count": 2070 }
296
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Model parallel language model training example The following example showcases how to train/fine-tune GPTNeo model with model parallelism using the JAX/Flax backend and the [`pjit`](https://jax.readthedocs.io/en/latest/jax.experimental.pjit.html) transformation. > Note: The example is experimental and might have bugs. Also currently it only supports single V3-8. The `partition.py` file defines the `PyTree` of `ParitionSpec` for the GPTNeo model which describes how the model will be sharded. The actual sharding is auto-matically handled by `pjit`. The weights are sharded across all local devices. To adapt the script for other models, we need to also change the `ParitionSpec` accordingly. TODO: Add more explantion. Before training, let's prepare our model first. To be able to shard the model, the sharded dimension needs to be a multiple of devices it'll be sharded on. But GPTNeo's vocab size is 50257, so we need to resize the embeddings accordingly. ```python from transformers import FlaxGPTNeoForCausalLM, GPTNeoConfig model = FlaxGPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") emb = jnp.zeros((50264, model.config.hidden_size)) # update the first 50257 weights using pre-trained weights emb = emb.at[:50257, :].set(model.params["transformer"]["wte"]["embedding"]) params = model.params params["transformer"]["wte"]["embedding"] = emb # initialize a random model with the right vocab_size config = GPTNeoConfig.from_pretrained("EleutherAI/gpt-neo-1.3B", vocab_size=50264) model = FlaxGPTNeoForCausalLM(config) # assign the pre-trained weights and save the model. model.params = params model.save_pretrained("gpt-neo-1.3B") ``` ### Train Model ```bash python run_clm_mp.py \ --model_name_or_path gpt-neo-1.3B \ --tokenizer_name openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --do_eval \ --block_size 1024 \ --num_train_epochs 5 \ --learning_rate 4e-6 \ --per_device_train_batch_size 3 --per_device_eval_batch_size 3 \ --overwrite_output_dir --output_dir ~/tmp/flax-clm \ --cache_dir ~/datasets_cache/wikitext --dtype bfloat16 \ --logging_steps 96 --eval_steps 96 ```
transformers/examples/research_projects/jax-projects/model_parallel/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/model_parallel/README.md", "repo_id": "transformers", "token_count": 918 }
297
<jupyter_start><jupyter_code># %pip install-r requirements.txt from IPython.display import clear_output, Image, display import PIL.Image import io import json import torch import numpy as np from processing_image import Preprocess from visualizing_image import SingleImageViz from modeling_frcnn import GeneralizedRCNN from utils import Config import utils from transformers import LxmertForQuestionAnswering, LxmertTokenizer import wget import pickle import os # URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg", URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg" OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt" ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt" GQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/gqa/trainval_label2ans.json" VQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json" # for visualizing output def showarray(a, fmt="jpeg"): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) # load object, attribute, and answer labels objids = utils.get_data(OBJ_URL) attrids = utils.get_data(ATTR_URL) gqa_answers = utils.get_data(GQA_URL) vqa_answers = utils.get_data(VQA_URL) # load models and model components frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg) image_preprocess = Preprocess(frcnn_cfg) lxmert_tokenizer = LxmertTokenizer.from_pretrained("unc-nlp/lxmert-base-uncased") lxmert_gqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-gqa-uncased") lxmert_vqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-vqa-uncased") # image viz frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids) # run frcnn images, sizes, scales_yx = image_preprocess(URL) output_dict = frcnn( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=frcnn_cfg.max_detections, return_tensors="pt", ) # add boxes and labels to the image frcnn_visualizer.draw_boxes( output_dict.get("boxes"), output_dict.pop("obj_ids"), output_dict.pop("obj_probs"), output_dict.pop("attr_ids"), output_dict.pop("attr_probs"), ) showarray(frcnn_visualizer._get_buffer()) test_questions_for_url1 = [ "Where is this scene?", "what is the man riding?", "What is the man wearing?", "What is the color of the horse?", ] test_questions_for_url2 = [ "Where is the cat?", "What is near the disk?", "What is the color of the table?", "What is the color of the cat?", "What is the shape of the monitor?", ] # Very important that the boxes are normalized normalized_boxes = output_dict.get("normalized_boxes") features = output_dict.get("roi_features") for test_question in test_questions_for_url2: # run lxmert test_question = [test_question] inputs = lxmert_tokenizer( test_question, padding="max_length", max_length=20, truncation=True, return_token_type_ids=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) # run lxmert(s) output_gqa = lxmert_gqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_feats=features, visual_pos=normalized_boxes, token_type_ids=inputs.token_type_ids, output_attentions=False, ) output_vqa = lxmert_vqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_feats=features, visual_pos=normalized_boxes, token_type_ids=inputs.token_type_ids, output_attentions=False, ) # get prediction pred_vqa = output_vqa["question_answering_score"].argmax(-1) pred_gqa = output_gqa["question_answering_score"].argmax(-1) print("Question:", test_question) print("prediction from LXMERT GQA:", gqa_answers[pred_gqa]) print("prediction from LXMERT VQA:", vqa_answers[pred_vqa])<jupyter_output>Question: ['Where is the cat?'] prediction from LXMERT GQA: desk prediction from LXMERT VQA: desk Question: ['What is near the disk?'] prediction from LXMERT GQA: can prediction from LXMERT VQA: cat Question: ['What is the color of the table?'] prediction from LXMERT GQA: brown prediction from LXMERT VQA: brown Question: ['What is the color of the cat?'] prediction from LXMERT GQA: black prediction from LXMERT VQA: black and white Question: ['What is the shape of the monitor?'] prediction from LXMERT GQA: square prediction from LXMERT VQA: rectangle
transformers/examples/research_projects/lxmert/demo.ipynb/0
{ "file_path": "transformers/examples/research_projects/lxmert/demo.ipynb", "repo_id": "transformers", "token_count": 1973 }
298
# Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Once a model has been fine-pruned, the weights that are masked during the forward pass can be pruned once for all. For instance, once the a model from the :class:`~emmental.MaskedBertForSequenceClassification` is trained, it can be saved (and then loaded) as a standard :class:`~transformers.BertForSequenceClassification`. """ import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def main(args): pruning_method = args.pruning_method threshold = args.threshold model_name_or_path = args.model_name_or_path.rstrip("/") target_model_path = args.target_model_path print(f"Load fine-pruned model from {model_name_or_path}") model = torch.load(os.path.join(model_name_or_path, "pytorch_model.bin")) pruned_model = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: pruned_model[name] = tensor print(f"Copied layer {name}") elif "classifier" in name or "qa_output" in name: pruned_model[name] = tensor print(f"Copied layer {name}") elif "bias" in name: pruned_model[name] = tensor print(f"Copied layer {name}") else: if pruning_method == "magnitude": mask = MagnitudeBinarizer.apply(inputs=tensor, threshold=threshold) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "topK": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] mask = TopKBinarizer.apply(scores, threshold) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] mask = ThresholdBinarizer.apply(scores, threshold, True) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") elif pruning_method == "l0": if "mask_scores" in name: continue prefix_ = name[:-6] scores = model[f"{prefix_}mask_scores"] l, r = -0.1, 1.1 s = torch.sigmoid(scores) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) pruned_model[name] = tensor * mask print(f"Pruned layer {name}") else: raise ValueError("Unknown pruning method") if target_model_path is None: target_model_path = os.path.join( os.path.dirname(model_name_or_path), f"bertarized_{os.path.basename(model_name_or_path)}" ) if not os.path.isdir(target_model_path): shutil.copytree(model_name_or_path, target_model_path) print(f"\nCreated folder {target_model_path}") torch.save(pruned_model, os.path.join(target_model_path, "pytorch_model.bin")) print("\nPruned model saved! See you later!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model. " "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared. " "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) args = parser.parse_args() main(args)
transformers/examples/research_projects/movement-pruning/bertarize.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/bertarize.py", "repo_id": "transformers", "token_count": 2329 }
299