text
stringlengths
7
3.71M
id
stringlengths
12
166
metadata
dict
__index_level_0__
int64
0
658
// // StatusView.swift // Diffusion-macOS // // Created by Cyril Zakka on 1/12/23. // See LICENSE at https://github.com/huggingface/swift-coreml-diffusers/LICENSE // import SwiftUI struct StatusView: View { @EnvironmentObject var generation: GenerationContext var pipelineState: Binding<PipelineState> @State private var showErrorPopover = false func submit() { if case .running = generation.state { return } Task { generation.state = .running(nil) do { let result = try await generation.generate() if result.userCanceled { generation.state = .userCanceled } else { generation.state = .complete(generation.positivePrompt, result.image, result.lastSeed, result.interval) } } catch { generation.state = .failed(error) } } } func errorWithDetails(_ message: String, error: Error) -> any View { HStack { Text(message) Spacer() Button { showErrorPopover.toggle() } label: { Image(systemName: "info.circle") }.buttonStyle(.plain) .popover(isPresented: $showErrorPopover) { VStack { Text(verbatim: "\(error)") .lineLimit(nil) .padding(.all, 5) Button { showErrorPopover.toggle() } label: { Text("Dismiss").frame(maxWidth: 200) } .padding(.bottom) } .frame(minWidth: 400, idealWidth: 400, maxWidth: 400) .fixedSize() } } } func generationStatusView() -> any View { switch generation.state { case .startup: return EmptyView() case .running(let progress): guard let progress = progress, progress.stepCount > 0 else { // The first time it takes a little bit before generation starts return HStack { Text("Preparing model…") Spacer() } } let step = Int(progress.step) + 1 let fraction = Double(step) / Double(progress.stepCount) return HStack { Text("Generating \(Int(round(100*fraction)))%") Spacer() } case .complete(_, let image, let lastSeed, let interval): guard let _ = image else { return HStack { Text("Safety checker triggered, please try a different prompt or seed.") Spacer() } } return HStack { let intervalString = String(format: "Time: %.1fs", interval ?? 0) Text(intervalString) Spacer() if generation.seed != lastSeed { Text(String("Seed: \(formatLargeNumber(lastSeed))")) Button("Set") { generation.seed = lastSeed } } }.frame(maxHeight: 25) case .failed(let error): return errorWithDetails("Generation error", error: error) case .userCanceled: return HStack { Text("Generation canceled.") Spacer() } } } var body: some View { switch pipelineState.wrappedValue { case .downloading(let progress): ProgressView("Downloading…", value: progress*100, total: 110).padding() case .uncompressing: ProgressView("Uncompressing…", value: 100, total: 110).padding() case .loading: ProgressView("Loading…", value: 105, total: 110).padding() case .ready: VStack { Button { submit() } label: { Text("Generate") .frame(maxWidth: .infinity) .frame(height: 50) } .buttonStyle(.borderedProminent) AnyView(generationStatusView()) } case .failed(let error): AnyView(errorWithDetails("Pipeline loading error", error: error)) } } } struct StatusView_Previews: PreviewProvider { static var previews: some View { StatusView(pipelineState: .constant(.downloading(0.2))) } }
swift-coreml-diffusers/Diffusion-macOS/StatusView.swift/0
{ "file_path": "swift-coreml-diffusers/Diffusion-macOS/StatusView.swift", "repo_id": "swift-coreml-diffusers", "token_count": 2493 }
390
// // TextToImage.swift // Diffusion // // Created by Pedro Cuenca on December 2022. // See LICENSE at https://github.com/huggingface/swift-coreml-diffusers/LICENSE // import SwiftUI import Combine import StableDiffusion /// Presents "Share" + "Save" buttons on Mac; just "Share" on iOS/iPadOS. /// This is because I didn't find a way for "Share" to show a Save option when running on macOS. struct ShareButtons: View { var image: CGImage var name: String var filename: String { name.replacingOccurrences(of: " ", with: "_") } var body: some View { let imageView = Image(image, scale: 1, label: Text(name)) if runningOnMac { HStack { ShareLink(item: imageView, preview: SharePreview(name, image: imageView)) Button() { guard let imageData = UIImage(cgImage: image).pngData() else { return } do { let fileURL = FileManager.default.temporaryDirectory.appendingPathComponent("\(filename).png") try imageData.write(to: fileURL) let controller = UIDocumentPickerViewController(forExporting: [fileURL]) let scene = UIApplication.shared.connectedScenes.first as! UIWindowScene scene.windows.first!.rootViewController!.present(controller, animated: true) } catch { print("Error creating file") } } label: { Label("Save…", systemImage: "square.and.arrow.down") } } } else { ShareLink(item: imageView, preview: SharePreview(name, image: imageView)) } } } struct ImageWithPlaceholder: View { @EnvironmentObject var generation: GenerationContext var state: Binding<GenerationState> var body: some View { switch state.wrappedValue { case .startup: return AnyView(Image("placeholder").resizable()) case .running(let progress): guard let progress = progress, progress.stepCount > 0 else { // The first time it takes a little bit before generation starts return AnyView(ProgressView()) } let step = Int(progress.step) + 1 let fraction = Double(step) / Double(progress.stepCount) let label = "Step \(step) of \(progress.stepCount)" return AnyView(VStack { Group { if let safeImage = generation.previewImage { Image(safeImage, scale: 1, label: Text("generated")) .resizable() .clipShape(RoundedRectangle(cornerRadius: 20)) } } ProgressView(label, value: fraction, total: 1).padding() }) case .complete(let lastPrompt, let image, _, let interval): guard let theImage = image else { return AnyView(Image(systemName: "exclamationmark.triangle").resizable()) } let imageView = Image(theImage, scale: 1, label: Text("generated")) return AnyView( VStack { imageView.resizable().clipShape(RoundedRectangle(cornerRadius: 20)) HStack { let intervalString = String(format: "Time: %.1fs", interval ?? 0) Rectangle().fill(.clear).overlay(Text(intervalString).frame(maxWidth: .infinity, alignment: .leading).padding(.leading)) Rectangle().fill(.clear).overlay( HStack { Spacer() ShareButtons(image: theImage, name: lastPrompt).padding(.trailing) } ) }.frame(maxHeight: 25) }) case .failed(_): return AnyView(Image(systemName: "exclamationmark.triangle").resizable()) case .userCanceled: return AnyView(Text("Generation canceled")) } } } struct TextToImage: View { @EnvironmentObject var generation: GenerationContext func submit() { if case .running = generation.state { return } Task { generation.state = .running(nil) do { let result = try await generation.generate() generation.state = .complete(generation.positivePrompt, result.image, result.lastSeed, result.interval) } catch { generation.state = .failed(error) } } } var body: some View { VStack { HStack { PromptTextField(text: $generation.positivePrompt, isPositivePrompt: true, model: iosModel().modelVersion) Button("Generate") { submit() } .padding() .buttonStyle(.borderedProminent) } ImageWithPlaceholder(state: $generation.state) .scaledToFit() Spacer() } .padding() .environmentObject(generation) } }
swift-coreml-diffusers/Diffusion/Views/TextToImage.swift/0
{ "file_path": "swift-coreml-diffusers/Diffusion/Views/TextToImage.swift", "repo_id": "swift-coreml-diffusers", "token_count": 2669 }
391
<div align="center"> # Text Embeddings Inference <a href="https://github.com/huggingface/text-embeddings-inference"> <img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-embeddings-inference?style=social"> </a> <a href="https://huggingface.github.io/text-embeddings-inference"> <img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational"> </a> A blazing fast inference solution for text embeddings models. Benchmark for [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) on an Nvidia A10 with a sequence length of 512 tokens: <p> <img src="assets/bs1-lat.png" width="400" /> <img src="assets/bs1-tp.png" width="400" /> </p> <p> <img src="assets/bs32-lat.png" width="400" /> <img src="assets/bs32-tp.png" width="400" /> </p> </div> ## Table of contents - [Get Started](#get-started) - [Supported Models](#supported-models) - [Docker](#docker) - [Docker Images](#docker-images) - [API Documentation](#api-documentation) - [Using a private or gated model](#using-a-private-or-gated-model) - [Using Re-rankers models](#using-re-rankers-models) - [Using Sequence Classification models](#using-sequence-classification-models) - [Using SPLADE pooling](#using-splade-pooling) - [Distributed Tracing](#distributed-tracing) - [gRPC](#grpc) - [Local Install](#local-install) - [Docker Build](#docker-build) - [Apple M1/M2 Arm](#apple-m1m2-arm64-architectures) - [Examples](#examples) Text Embeddings Inference (TEI) is a toolkit for deploying and serving open source text embeddings and sequence classification models. TEI enables high-performance extraction for the most popular models, including FlagEmbedding, Ember, GTE and E5. TEI implements many features such as: * No model graph compilation step * Metal support for local execution on Macs * Small docker images and fast boot times. Get ready for true serverless! * Token based dynamic batching * Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention), [Candle](https://github.com/huggingface/candle) and [cuBLASLt](https://docs.nvidia.com/cuda/cublas/#using-the-cublaslt-api) * [Safetensors](https://github.com/huggingface/safetensors) weight loading * Production ready (distributed tracing with Open Telemetry, Prometheus metrics) ## Get Started ### Supported Models #### Text Embeddings You can use any JinaBERT model with Alibi or absolute positions or any BERT, CamemBERT, RoBERTa, or XLM-RoBERTa model with absolute positions in `text-embeddings-inference`. **Support for other model types will be added in the future.** Examples of supported models: | MTEB Rank | Model Type | Model ID | |-----------|-------------|--------------------------------------------------------------------------------------------------| | 6 | Bert | [WhereIsAI/UAE-Large-V1](https://hf.co/WhereIsAI/UAE-Large-V1) | | 10 | XLM-RoBERTa | [intfloat/multilingual-e5-large-instruct](https://hf.co/intfloat/multilingual-e5-large-instruct) | | N/A | NomicBert | [nomic-ai/nomic-embed-text-v1](https://hf.co/nomic-ai/nomic-embed-text-v1) | | N/A | NomicBert | [nomic-ai/nomic-embed-text-v1.5](https://hf.co/nomic-ai/nomic-embed-text-v1.5) | | N/A | JinaBERT | [jinaai/jina-embeddings-v2-base-en](https://hf.co/jinaai/jina-embeddings-v2-base-en) | You can explore the list of best performing text embeddings models [here](https://huggingface.co/spaces/mteb/leaderboard). #### Sequence Classification and Re-Ranking `text-embeddings-inference` v0.4.0 added support for Bert, CamemBERT, RoBERTa and XLM-RoBERTa Sequence Classification models. Example of supported sequence classification models: | Task | Model Type | Model ID | Revision | |--------------------|-------------|---------------------------------------------------------------------------------------------|-------------| | Re-Ranking | XLM-RoBERTa | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | `refs/pr/4` | | Re-Ranking | XLM-RoBERTa | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | `refs/pr/5` | | Sentiment Analysis | RoBERTa | [SamLowe/roberta-base-go_emotions](https://huggingface.co/SamLowe/roberta-base-go_emotions) | | ### Docker ```shell model=BAAI/bge-large-en-v1.5 revision=refs/pr/5 volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model --revision $revision ``` And then you can make requests like ```bash curl 127.0.0.1:8080/embed \ -X POST \ -d '{"inputs":"What is Deep Learning?"}' \ -H 'Content-Type: application/json' ``` **Note:** To use GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). NVIDIA drivers on your machine need to be compatible with CUDA version 12.2 or higher. To see all options to serve your models: ```shell text-embeddings-router --help ``` ``` Usage: text-embeddings-router [OPTIONS] Options: --model-id <MODEL_ID> The name of the model to load. Can be a MODEL_ID as listed on <https://hf.co/models> like `thenlper/gte-base`. Or it can be a local directory containing the necessary files as saved by `save_pretrained(...)` methods of transformers [env: MODEL_ID=] [default: thenlper/gte-base] --revision <REVISION> The actual revision of the model if you're referring to a model on the hub. You can use a specific commit id or a branch like `refs/pr/2` [env: REVISION=] --tokenization-workers <TOKENIZATION_WORKERS> Optionally control the number of tokenizer workers used for payload tokenization, validation and truncation. Default to the number of CPU cores on the machine [env: TOKENIZATION_WORKERS=] --dtype <DTYPE> The dtype to be forced upon the model [env: DTYPE=] [possible values: float16, float32] --pooling <POOLING> Optionally control the pooling method for embedding models. If `pooling` is not set, the pooling configuration will be parsed from the model `1_Pooling/config.json` configuration. If `pooling` is set, it will override the model pooling configuration [env: POOLING=] Possible values: - cls: Select the CLS token as embedding - mean: Apply Mean pooling to the model embeddings - splade: Apply SPLADE (Sparse Lexical and Expansion) to the model embeddings. This option is only available if the loaded model is a `ForMaskedLM` Transformer model --max-concurrent-requests <MAX_CONCURRENT_REQUESTS> The maximum amount of concurrent requests for this particular deployment. Having a low limit will refuse clients requests instead of having them wait for too long and is usually good to handle backpressure correctly [env: MAX_CONCURRENT_REQUESTS=] [default: 512] --max-batch-tokens <MAX_BATCH_TOKENS> **IMPORTANT** This is one critical control to allow maximum usage of the available hardware. This represents the total amount of potential tokens within a batch. For `max_batch_tokens=1000`, you could fit `10` queries of `total_tokens=100` or a single query of `1000` tokens. Overall this number should be the largest possible until the model is compute bound. Since the actual memory overhead depends on the model implementation, text-embeddings-inference cannot infer this number automatically. [env: MAX_BATCH_TOKENS=] [default: 16384] --max-batch-requests <MAX_BATCH_REQUESTS> Optionally control the maximum number of individual requests in a batch [env: MAX_BATCH_REQUESTS=] --max-client-batch-size <MAX_CLIENT_BATCH_SIZE> Control the maximum number of inputs that a client can send in a single request [env: MAX_CLIENT_BATCH_SIZE=] [default: 32] --hf-api-token <HF_API_TOKEN> Your HuggingFace hub token [env: HF_API_TOKEN=] --hostname <HOSTNAME> The IP address to listen on [env: HOSTNAME=] [default: 0.0.0.0] -p, --port <PORT> The port to listen on [env: PORT=] [default: 3000] --uds-path <UDS_PATH> The name of the unix socket some text-embeddings-inference backends will use as they communicate internally with gRPC [env: UDS_PATH=] [default: /tmp/text-embeddings-inference-server] --huggingface-hub-cache <HUGGINGFACE_HUB_CACHE> The location of the huggingface hub cache. Used to override the location if you want to provide a mounted disk for instance [env: HUGGINGFACE_HUB_CACHE=/data] --payload-limit <PAYLOAD_LIMIT> Payload size limit in bytes Default is 2MB [env: PAYLOAD_LIMIT=] [default: 2000000] --api-key <API_KEY> Set an api key for request authorization. By default the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. [env: API_KEY=] --json-output Outputs the logs in JSON format (useful for telemetry) [env: JSON_OUTPUT=] --otlp-endpoint <OTLP_ENDPOINT> The grpc endpoint for opentelemetry. Telemetry is sent to this endpoint as OTLP over gRPC. e.g. `http://localhost:4317` [env: OTLP_ENDPOINT=] --cors-allow-origin <CORS_ALLOW_ORIGIN> [env: CORS_ALLOW_ORIGIN=] ``` ### Docker Images Text Embeddings Inference ships with multiple Docker images that you can use to target a specific backend: | Architecture | Image | |-------------------------------------|-------------------------------------------------------------------------| | CPU | ghcr.io/huggingface/text-embeddings-inference:cpu-1.2 | | Volta | NOT SUPPORTED | | Turing (T4, RTX 2000 series, ...) | ghcr.io/huggingface/text-embeddings-inference:turing-1.2 (experimental) | | Ampere 80 (A100, A30) | ghcr.io/huggingface/text-embeddings-inference:1.2 | | Ampere 86 (A10, A40, ...) | ghcr.io/huggingface/text-embeddings-inference:86-1.2 | | Ada Lovelace (RTX 4000 series, ...) | ghcr.io/huggingface/text-embeddings-inference:89-1.2 | | Hopper (H100) | ghcr.io/huggingface/text-embeddings-inference:hopper-1.2 (experimental) | **Warning**: Flash Attention is turned off by default for the Turing image as it suffers from precision issues. You can turn Flash Attention v1 ON by using the `USE_FLASH_ATTENTION=True` environment variable. ### API documentation You can consult the OpenAPI documentation of the `text-embeddings-inference` REST API using the `/docs` route. The Swagger UI is also available at: [https://huggingface.github.io/text-embeddings-inference](https://huggingface.github.io/text-embeddings-inference). ### Using a private or gated model You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by `text-embeddings-inference`. This allows you to gain access to protected resources. For example: 1. Go to https://huggingface.co/settings/tokens 2. Copy your cli READ token 3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>` or with Docker: ```shell model=<your private model> volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run token=<your cli READ token> docker run --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model ``` ### Using Re-rankers models `text-embeddings-inference` v0.4.0 added support for CamemBERT, RoBERTa and XLM-RoBERTa Sequence Classification models. Re-rankers models are Sequence Classification cross-encoders models with a single class that scores the similarity between a query and a text. See [this blogpost](https://blog.llamaindex.ai/boosting-rag-picking-the-best-embedding-reranker-models-42d079022e83) by the LlamaIndex team to understand how you can use re-rankers models in your RAG pipeline to improve downstream performance. ```shell model=BAAI/bge-reranker-large revision=refs/pr/4 volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model --revision $revision ``` And then you can rank the similarity between a query and a list of texts with: ```bash curl 127.0.0.1:8080/rerank \ -X POST \ -d '{"query":"What is Deep Learning?", "texts": ["Deep Learning is not...", "Deep learning is..."]}' \ -H 'Content-Type: application/json' ``` ### Using Sequence Classification models You can also use classic Sequence Classification models like `SamLowe/roberta-base-go_emotions`: ```shell model=SamLowe/roberta-base-go_emotions volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model ``` Once you have deployed the model you can use the `predict` endpoint to get the emotions most associated with an input: ```bash curl 127.0.0.1:8080/predict \ -X POST \ -d '{"inputs":"I like you."}' \ -H 'Content-Type: application/json' ``` ### Using SPLADE pooling You can choose to activate SPLADE pooling for Bert and Distilbert MaskedLM architectures: ```shell model=naver/efficient-splade-VI-BT-large-query volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model --pooling splade ``` Once you have deployed the model you can use the `/embed_sparse` endpoint to get the sparse embedding: ```bash curl 127.0.0.1:8080/embed_sparse \ -X POST \ -d '{"inputs":"I like you."}' \ -H 'Content-Type: application/json' ``` ### Distributed Tracing `text-embeddings-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature by setting the address to an OTLP collector with the `--otlp-endpoint` argument. ### gRPC `text-embeddings-inference` offers a gRPC API as an alternative to the default HTTP API for high performance deployments. The API protobuf definition can be found [here](https://github.com/huggingface/text-embeddings-inference/blob/main/proto/tei.proto). You can use the gRPC API by adding the `-grpc` tag to any TEI Docker image. For example: ```shell model=BAAI/bge-large-en-v1.5 revision=refs/pr/5 volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2-grpc --model-id $model --revision $revision ``` ```shell grpcurl -d '{"inputs": "What is Deep Learning"}' -plaintext 0.0.0.0:8080 tei.v1.Embed/Embed ``` ## Local install ### CPU You can also opt to install `text-embeddings-inference` locally. First [install Rust](https://rustup.rs/): ```shell curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` Then run: ```shell # On x86 cargo install --path router -F mkl # On M1 or M2 cargo install --path router -F metal ``` You can now launch Text Embeddings Inference on CPU with: ```shell model=BAAI/bge-large-en-v1.5 revision=refs/pr/5 text-embeddings-router --model-id $model --revision $revision --port 8080 ``` **Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```shell sudo apt-get install libssl-dev gcc -y ``` ### Cuda GPUs with Cuda compute capabilities < 7.5 are not supported (V100, Titan V, GTX 1000 series, ...). Make sure you have Cuda and the nvidia drivers installed. NVIDIA drivers on your device need to be compatible with CUDA version 12.2 or higher. You also need to add the nvidia binaries to your path: ```shell export PATH=$PATH:/usr/local/cuda/bin ``` Then run: ```shell # This can take a while as we need to compile a lot of cuda kernels # On Turing GPUs (T4, RTX 2000 series ... ) cargo install --path router -F candle-cuda-turing -F http --no-default-features # On Ampere and Hopper cargo install --path router -F candle-cuda -F http --no-default-features ``` You can now launch Text Embeddings Inference on GPU with: ```shell model=BAAI/bge-large-en-v1.5 revision=refs/pr/5 text-embeddings-router --model-id $model --revision $revision --port 8080 ``` ## Docker build You can build the CPU container with: ```shell docker build . ``` To build the Cuda containers, you need to know the compute cap of the GPU you will be using at runtime. Then you can build the container with: ```shell # Example for Turing (T4, RTX 2000 series, ...) runtime_compute_cap=75 # Example for A100 runtime_compute_cap=80 # Example for A10 runtime_compute_cap=86 # Example for Ada Lovelace (RTX 4000 series, ...) runtime_compute_cap=89 # Example for H100 runtime_compute_cap=90 docker build . -f Dockerfile-cuda --build-arg CUDA_COMPUTE_CAP=$runtime_compute_cap ``` ### Apple M1/M2 arm64 architectures #### DISCLAIMER As explained here [MPS-Ready, ARM64 Docker Image](https://github.com/pytorch/pytorch/issues/81224), Metal / MPS is not supported via Docker. As such inference will be CPU bound and most likely pretty slow when using this docker image on an M1/M2 ARM CPU. ``` docker build . -f Dockerfile-arm64 --platform=linux/arm64 ``` ## Examples - [Set up an Inference Endpoint with TEI](https://huggingface.co/learn/cookbook/automatic_embedding_tei_inference_endpoints) - [RAG containers with TEI](https://github.com/plaggy/rag-containers)
text-embeddings-inference/README.md/0
{ "file_path": "text-embeddings-inference/README.md", "repo_id": "text-embeddings-inference", "token_count": 7184 }
392
#[cfg(any(feature = "mkl", feature = "mkl-dynamic"))] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; mod bert; mod distilbert; mod jina; mod nomic; #[cfg(feature = "cuda")] mod flash_bert; #[cfg(feature = "cuda")] mod flash_jina; #[cfg(feature = "cuda")] mod flash_nomic; #[cfg(feature = "cuda")] mod flash_distilbert; pub use bert::{BertConfig, BertModel, PositionEmbeddingType}; use candle::{Result, Tensor}; pub use distilbert::{DistilBertConfig, DistilBertModel}; pub use jina::JinaBertModel; pub use nomic::{NomicBertModel, NomicConfig}; use text_embeddings_backend_core::Batch; #[cfg(feature = "cuda")] pub use flash_bert::FlashBertModel; #[cfg(feature = "cuda")] pub use flash_jina::FlashJinaBertModel; #[cfg(feature = "cuda")] pub use flash_nomic::FlashNomicBertModel; #[cfg(feature = "cuda")] pub use flash_distilbert::FlashDistilBertModel; pub(crate) trait Model { fn is_padded(&self) -> bool; fn embed(&self, _batch: Batch) -> Result<(Option<Tensor>, Option<Tensor>)> { candle::bail!("`embed` is not implemented for this model"); } fn predict(&self, _batch: Batch) -> Result<Tensor> { candle::bail!("`predict is not implemented for this model"); } }
text-embeddings-inference/backends/candle/src/models.rs/0
{ "file_path": "text-embeddings-inference/backends/candle/src/models.rs", "repo_id": "text-embeddings-inference", "token_count": 502 }
393
--- source: backends/candle/tests/test_bert.rs expression: embeddings_single --- - - -0.5905822 - -0.2525167 - 0.14959584 - 0.054548685 - -0.11789139 - -0.16702051 - -0.2331886 - -0.07111306 - -0.19944426 - -0.29595104 - -0.7731143 - 0.14407897 - -0.29080537 - -0.18528816 - -0.90925914 - -0.020204756 - 0.028502962 - 0.34817806 - -1.0032953 - -0.41697448 - 0.1934247 - 0.40048108 - -0.3906826 - -0.18889977 - 0.4526494 - 0.25689012 - 0.17726377 - -0.31426263 - 0.1250183 - -0.08393615 - 0.6500511 - -0.2302905 - 0.18419918 - 0.49622348 - -0.36559528 - 0.83769214 - -0.551407 - 0.103570856 - 0.09400676 - -0.21002762 - -0.29478115 - -0.18860105 - -0.009910335 - 0.18697806 - 0.92610204 - 0.33941612 - -0.082030125 - -0.6034355 - 0.15340401 - 0.21050398 - -0.20921578 - -0.13902524 - -0.3827591 - 0.63712025 - 0.0040485496 - 0.09897916 - 0.2789379 - -0.021972388 - -0.53686434 - 0.3202223 - 0.48562184 - -0.69386727 - 0.024683978 - 0.23594321 - 0.29521427 - -0.27045968 - 0.048694637 - 0.110244274 - 0.53926224 - -0.77139634 - 0.4209654 - 0.4750518 - -0.052538875 - 0.07966832 - 0.14345014 - -0.43797436 - 0.21794893 - 0.055310573 - 0.7462658 - -0.21850769 - 0.7266181 - 0.549555 - -0.11633466 - 0.368249 - 1.0122206 - -0.5168573 - 0.11747182 - 0.06994404 - -0.6778618 - -0.52381235 - -0.2710965 - -0.5250548 - -0.5948887 - -0.0012941743 - -0.2924962 - -0.095263354 - -0.051451437 - -0.9137996 - -0.40414217 - 0.3842635 - -0.0225345 - 0.062937684 - 0.41753647 - -0.047207624 - -0.068914846 - 0.090506375 - 0.42675138 - 0.109467186 - 0.31070897 - -0.62438476 - -0.19247803 - 0.2941215 - 0.18347324 - -0.18997712 - 0.1642261 - -0.19337931 - -0.102235556 - -0.041086674 - -0.14846203 - 0.6917226 - -0.8732191 - -0.060058128 - -0.24552263 - -0.12856898 - -0.2914501 - -0.1887479 - -0.3668236 - -0.000000000000000000000000000000029240537 - -0.0039896094 - -0.36697504 - -0.45366925 - 0.04975644 - 0.31989673 - -0.42694908 - -0.20267479 - 0.09384082 - -0.42025587 - 0.44595543 - -0.14169464 - 0.0813049 - -0.138726 - 1.1364049 - 0.3478461 - 0.09553712 - -0.64425725 - 0.5023483 - 0.83466536 - -0.5949079 - -0.060975213 - 0.31882063 - -0.17581889 - 0.022465851 - -0.05469214 - -0.10731493 - 0.15609773 - 0.16630125 - 0.21610661 - -0.028483214 - -0.5626438 - 0.07620309 - -0.16477105 - -0.069259115 - 0.3652963 - 0.042083118 - 0.018148389 - -0.062457535 - 0.30002403 - -0.26324186 - 0.3456572 - 0.062287714 - -0.06215567 - -0.43306065 - -0.20688285 - -0.1685666 - 0.2078495 - -0.71178585 - -0.6564981 - -0.38591677 - -0.052962523 - -0.3261991 - -0.18351372 - -0.4604339 - 0.2930664 - 0.277802 - 0.21312878 - 0.20633987 - 0.12381153 - 0.18202356 - 0.53390193 - 0.39961782 - -0.46880856 - 0.6647521 - -0.09672223 - 0.16156423 - 0.6650926 - 0.6806067 - 1.0482094 - -0.09435779 - 0.029582651 - 0.09683895 - 0.203073 - -0.5732891 - -0.07686127 - 0.08099214 - 0.15478149 - -0.26159474 - -0.2566363 - 0.78577054 - -0.47089925 - 0.30936465 - -0.25532243 - 0.0064516324 - -0.53040725 - 0.6080942 - -0.24619754 - -0.30199987 - 0.26108736 - 0.014690906 - -0.70210713 - -0.049562275 - -0.14834751 - -0.07956274 - -0.16515985 - 0.000000000000000000000000000000021861 - -0.25157446 - 0.43375129 - -0.8755244 - 0.7175831 - 0.2902798 - 0.0074201175 - -0.62435234 - 0.35527846 - -0.552679 - 0.18954086 - 0.20335351 - -0.035552207 - 0.4529572 - 0.10931892 - 0.19148143 - -0.16911459 - -0.35587427 - -0.3493578 - -0.47012717 - -0.26946193 - 0.23536286 - 0.58844364 - -0.3380215 - -0.24592063 - -0.15954623 - -0.22448906 - -0.25596026 - 0.12521902 - -0.106268756 - 0.28519255 - 0.17178701 - -0.3287186 - -0.6783996 - 0.4482324 - 0.29271346 - 0.50640035 - 0.5931688 - -0.28495908 - -0.26254502 - -0.4237756 - 0.21733634 - -0.009652589 - 0.18802977 - 0.254598 - -0.31955296 - -0.05901348 - 0.0068638837 - -0.044022094 - 0.32722753 - -0.19676164 - 0.31067184 - 0.027737487 - 0.034569282 - -0.3569302 - -0.25845194 - -0.07841165 - -0.10891752 - 0.18727538 - 0.14427944 - 0.43520635 - -0.18098663 - -0.4947866 - -0.11612529 - -0.18040998 - -0.16923454 - 0.8050995 - -0.596841 - 0.6288896 - 0.03233897 - 0.14519283 - 0.80883557 - 0.10845174 - 0.34680286 - 0.5812995 - -0.5397416 - -0.5434021 - 0.057071857 - -0.09486484 - -0.33432207 - -0.18326911 - 0.39803055 - -0.80921376 - -0.10375113 - 0.65296334 - -0.09063423 - 0.10393066 - -0.24666296 - 0.5133282 - -0.35965517 - -0.02616671 - -0.24137843 - 0.10464253 - -0.6334521 - 0.0021675143 - -0.52741486 - -0.00000009196107 - -0.19908127 - 0.088920064 - 0.5567359 - -0.8009427 - 0.3083997 - -0.18737307 - 0.10197395 - 0.82060134 - 0.024411181 - 0.07724148 - 0.60279316 - 0.01429141 - -0.5294417 - -0.15347035 - 0.027200898 - 0.6112227 - 0.45954376 - -0.0004197508 - 0.41491097 - 0.02481507 - 0.8714842 - 0.32075447 - 0.120130405 - -0.055921085 - 0.22076562 - -0.6300306 - 0.36908314 - 0.68083143 - 0.10630507 - -0.4252642 - -0.20267203 - 0.53380257 - -0.12403769 - -0.16542706 - 0.019964132 - 0.8855693 - 0.30768922 - -0.28937826 - -0.05903546 - -0.4054046 - -0.4042628 - 1.1373893 - 0.27809668 - -0.06820501 - -0.4680714 - 0.089684784 - 0.31660247 - -0.054611325 - 0.14755571 - 0.11694164 - -0.16122253 - 0.51518285 - 0.6203127 - 0.64230794 - -0.036301676 - 0.11894374 - 0.017895553 - -0.5259501 - -0.16221896 - 0.73642933 - -0.079223275 - 0.54166216 - 0.28228587 - 0.27675694
text-embeddings-inference/backends/candle/tests/snapshots/test_bert__mini_single.snap/0
{ "file_path": "text-embeddings-inference/backends/candle/tests/snapshots/test_bert__mini_single.snap", "repo_id": "text-embeddings-inference", "token_count": 3466 }
394
--- source: backends/candle/tests/test_jina.rs expression: embeddings_batch --- - - -0.73668385 - -0.34012684 - -0.0715523 - 0.10852202 - 0.14204022 - -0.40989703 - 0.23236847 - 0.14997244 - 0.22580299 - -0.026758675 - 0.39548117 - -0.25018567 - -0.61580956 - -1.0374275 - 0.3620463 - -0.39094338 - 0.30281666 - -0.63823456 - -0.06514027 - -0.43619013 - -0.56252056 - -0.032389965 - 0.7796189 - -0.07337697 - 0.6910109 - -0.42153034 - -0.44665217 - -0.08850418 - 0.9865168 - 0.719132 - -0.26575097 - 0.348945 - 0.2953208 - 0.06414049 - -0.6311697 - 0.49613383 - 0.5473903 - 0.34628963 - -0.30335656 - -0.37656382 - -0.24137187 - 0.22599705 - 0.7282061 - 0.31632474 - -0.5788445 - -0.048453815 - -0.99214715 - 0.67147404 - 0.40648526 - -0.08697222 - -0.09709507 - 0.13716747 - -0.3302538 - 0.6385713 - -0.4333674 - 0.17140126 - -0.10378386 - 0.34555072 - -0.35002294 - 0.37138405 - 0.6942465 - -0.012582751 - -0.29724523 - 0.3630598 - -0.3450033 - -0.58791435 - -0.2560966 - -0.06847818 - -0.82814556 - 0.5919305 - -0.5577107 - -0.3965461 - 0.3292896 - -0.66765755 - 0.10509022 - -0.029757977 - 0.18136811 - 0.74675995 - -0.2849062 - -0.7784263 - -0.46817058 - -0.15993918 - -0.5867964 - 0.81102645 - 0.40201172 - -0.8813075 - 0.13769837 - 0.21844776 - 0.65046084 - -0.4814538 - 0.13929038 - 0.1781513 - -0.25574976 - -0.6823837 - -0.32714817 - -0.7217614 - 0.672464 - 0.27874002 - -0.8074308 - 0.7273515 - 0.6546895 - 0.3032117 - 0.3657865 - -0.19880594 - -0.07629948 - 0.4256604 - -1.078911 - -0.20131199 - -1.1999272 - -0.59212506 - -0.17580846 - -0.3750479 - -0.41822523 - -0.2924359 - -0.37015983 - -0.15523282 - 0.61704636 - -0.6305304 - 0.044137325 - 0.11221082 - 1.0062258 - -0.0049912357 - -0.47516233 - 0.56906956 - 0.3740537 - 0.8261896 - -0.32491022 - -0.005009859 - -0.23551618 - -0.039758164 - -0.12311789 - 0.71079177 - -0.36461496 - 0.6814915 - 0.8928003 - 0.07970085 - 0.30849716 - 0.2967249 - -0.2721908 - 0.75819844 - -0.29332206 - -0.5996169 - 0.56722975 - -0.379029 - -0.24413143 - 0.19070697 - 1.0095695 - -0.40070263 - -0.48254392 - 0.27729446 - -0.27582094 - 0.46122116 - -0.8984841 - -0.61792296 - -0.51835155 - 0.8659856 - -0.063467644 - 0.17027345 - 0.27787298 - -0.52405035 - -0.4709656 - 0.74265236 - -0.9647818 - 0.34670112 - -0.03367323 - -0.29435757 - 0.7839863 - 0.954704 - -0.074295476 - -0.11577297 - 0.34262037 - 0.60408914 - -0.15067175 - -0.56553507 - -0.45733213 - -0.2838439 - -0.562523 - 0.4098379 - -0.15422437 - -0.26893747 - 0.063611485 - -0.72149956 - -0.101162024 - 0.11956217 - 0.19708382 - -0.3307448 - -0.15375762 - -0.30214736 - -0.25265673 - -0.49330828 - -0.3747701 - 0.011120404 - 0.18189253 - -0.030484946 - -0.6194291 - 0.21167146 - 0.227614 - 0.16227365 - -0.08826481 - 0.49216056 - 0.2937846 - 0.8931769 - -0.59395313 - 0.74639904 - 0.00653029 - 0.34859207 - -0.6235556 - -0.4537472 - -0.63391864 - -0.1443559 - 0.07003089 - 0.037237544 - 0.9805306 - 0.2809817 - 0.073027 - 0.32501057 - -0.37974665 - -0.4622294 - 0.39487654 - 0.06744876 - 0.31691965 - -0.049856696 - 0.13301113 - 0.28951162 - 0.03810055 - -0.38808325 - 0.043499887 - 0.008641668 - -0.24204402 - -0.090396896 - 0.18011324 - 0.23784415 - -0.5009206 - 0.1553507 - 0.5209598 - 0.3395558 - -0.5927804 - -0.025065303 - 0.15375154 - 0.077146195 - 0.16486229 - 0.9559272 - 0.34631437 - -0.455141 - -0.39787167 - 0.29098955 - 0.5584746 - 0.059295062 - 0.1384459 - 0.42039558 - 0.010655655 - 0.021553522 - -0.4591532 - -0.69064206 - -0.0022638962 - -0.030271845 - 0.25798056 - 0.7935306 - -0.25783426 - -0.14395986 - 0.2626888 - 0.28115913 - -0.34353155 - 1.0121855 - -0.2966 - 0.3429189 - -0.5701311 - -0.09280105 - 0.48118117 - 0.35712275 - -0.16092014 - 0.116320096 - -0.16216387 - -0.4784748 - -0.49545664 - 0.026475558 - 0.58664286 - -0.20623466 - 0.52622694 - -0.26142675 - -0.82888204 - -0.05105743 - -0.13397925 - -0.45493487 - 0.45166442 - 0.28790733 - 0.9281663 - 0.5534405 - 0.24245916 - 0.24755414 - 0.47102252 - -0.22941728 - -0.059087906 - -0.12679414 - -0.23186122 - 0.3594575 - 0.36945596 - 0.38604233 - 0.04756975 - 0.09457792 - 0.7681518 - 0.51292485 - -0.4647774 - -0.41134024 - -0.14788796 - -0.026648177 - 0.13731936 - 0.1844856 - -0.4147409 - -0.4478915 - -0.027359122 - -0.3959928 - -0.37414354 - -0.19002788 - 0.39353797 - 0.007390729 - 0.37485725 - -0.12709224 - -0.46179503 - -0.14690821 - -0.06336937 - 0.037377708 - 0.07596908 - -0.34052616 - 0.46046728 - 0.5605055 - -0.55663145 - -0.35354254 - 0.06384647 - -0.14710614 - -0.22530709 - 0.52121586 - -0.0495451 - -0.8934284 - -0.111877166 - 0.31237033 - -0.6044664 - -0.7920669 - 0.017268227 - -0.31899163 - 0.10419905 - -0.34517533 - -0.3948041 - 0.5101954 - -0.08465504 - 0.25743407 - 0.5437978 - -0.6793512 - -0.45921823 - 0.5326503 - -0.18141039 - -0.54093796 - 0.039407533 - 0.017359953 - -1.1775223 - 0.10029836 - 0.17366055 - -0.85059124 - -0.2780114 - 0.96832466 - -0.6595807 - -0.7359334 - 0.24694958 - 0.7053874 - 0.07484163 - -0.06028542 - -0.2428803 - 0.026168117 - 0.32321474 - 0.6359962 - 0.6015481 - -0.23883562 - -0.26510406 - -1.3345877 - -0.3764 - 0.07601651 - 0.27453107 - -0.3821561 - -0.23207092 - -0.09694352 - -0.4056513 - 0.6571864 - 0.13454896 - -0.4461467 - -0.30312943 - 0.1957128 - 0.015960306 - 0.40737882 - 0.36711743 - 0.07948569 - -0.3717199 - 0.16874814 - -0.31267354 - -0.23825826 - -0.0774443 - -0.38939747 - -0.14293732 - 0.90112275 - -0.72516924 - 0.984644 - 0.019674359 - 0.055611532 - -0.2934082 - 0.46005297 - -0.19833238 - -0.45715576 - -0.3313622 - -0.027743893 - 0.38861424 - 0.15984824 - 0.44063833 - 0.30860153 - 0.27471563 - -0.13229555 - -0.51096684 - -0.8010029 - -0.4840509 - -0.519855 - 0.21440867 - 0.06478418 - 0.6136018 - 0.05823111 - 0.02230786 - -0.31735128 - 0.45651013 - 0.05455484 - 0.03172924 - -0.15754183 - 0.57087106 - 0.302311 - 0.32235426 - -0.37271267 - -0.062497634 - 0.49394235 - -0.5000919 - 0.47894144 - -0.055776197 - -0.7185101 - 0.34229258 - -0.47906867 - 1.2254735 - -0.63414246 - 0.091453776 - 0.04621964 - 0.39976168 - 1.1370289 - 0.8316378 - -0.1304873 - -0.19904993 - 0.366474 - 0.40303475 - 0.19891736 - -0.2694296 - -0.007872573 - -0.21664867 - 0.07184189 - -0.30892542 - -0.51357216 - -0.5767418 - 0.6941703 - -0.054563563 - -0.5326759 - -0.47848055 - 0.40909168 - 0.11241865 - 0.43552828 - 0.08060895 - -0.3370398 - 0.17169443 - 0.21128477 - -0.65164536 - -1.0550821 - -0.3331693 - 0.5605589 - 0.8862934 - -0.022461575 - -0.13918051 - 0.85254467 - -0.26736432 - 0.40900162 - -0.54530114 - -0.109466515 - -0.5180425 - -1.1476005 - 0.017990198 - 0.22895983 - -0.48230448 - 0.32160607 - 0.71078336 - 0.48590115 - -0.64358234 - -0.75938046 - 0.105871856 - -0.4471452 - 0.071705274 - 0.46669033 - 0.29078048 - -0.109523356 - 0.56042516 - 0.11813028 - 0.46236435 - -0.7612805 - 0.08246316 - 0.561123 - -0.22669752 - 0.46435124 - -0.12556691 - -0.17807259 - -0.38081747 - 0.089175105 - 0.37251982 - 0.009899339 - - -0.6258575 - -0.54804754 - 0.13834608 - 0.3368268 - 0.118097365 - -0.44837838 - 0.38703012 - -0.03681364 - 0.16565804 - -0.17456621 - 0.46841913 - -0.16639332 - -0.6830714 - -0.9968887 - 0.74894214 - -0.49739882 - 0.17090584 - -0.51595217 - -0.025067393 - -0.43994397 - -0.5288875 - -0.010524412 - 0.68397003 - 0.00094923377 - 0.7182251 - -0.7732404 - -0.53631896 - -0.014902936 - 0.8814053 - 0.49463782 - -0.3484184 - 0.2203561 - 0.535584 - 0.053611778 - -0.6109779 - 0.34571558 - 0.55962723 - 0.42110342 - -0.3431571 - -0.42719498 - -0.1622013 - -0.063774794 - 0.7308566 - 0.37309992 - -0.54693377 - 0.10378383 - -0.82453156 - 0.56506014 - 0.60045683 - -0.012032587 - -0.17478134 - 0.24510865 - -0.5098945 - 0.9484012 - -0.29771543 - 0.044558473 - -0.2106627 - 0.31117538 - -0.60099584 - 0.18622015 - 0.693832 - 0.09960179 - -0.42590287 - 0.32845956 - -0.40717852 - -0.44070238 - 0.028581027 - -0.19334748 - -0.8829633 - 0.4422871 - -0.7548492 - -0.39266592 - 0.69387734 - -0.6330214 - 0.21701033 - 0.18680653 - 0.09905142 - 0.8045387 - -0.315876 - -0.7137362 - -0.5562205 - -0.1079926 - -0.4724413 - 0.96796775 - 0.29352766 - -0.81541336 - 0.36886823 - 0.4787166 - 0.52278715 - -0.48305058 - 0.13325576 - 0.4875254 - -0.22803363 - -0.7590358 - -0.14664412 - -0.879706 - 0.682916 - 0.14855725 - -1.0184903 - 0.5507337 - 0.569222 - 0.31514075 - 0.62274027 - -0.32432622 - -0.09131627 - 0.43202394 - -0.8701954 - -0.08341806 - -1.2153687 - -0.40948495 - -0.1702378 - -0.54740334 - -0.48194543 - -0.2423253 - -0.4687544 - -0.13046497 - 0.66072136 - -0.514049 - -0.048983276 - -0.1139099 - 1.1299253 - -0.06445108 - -0.26474863 - 0.672685 - 0.28219154 - 0.83556813 - -0.3372388 - -0.010138115 - -0.30575222 - 0.12501504 - -0.18299821 - 0.68838155 - -0.20157899 - 0.66440886 - 0.98802114 - 0.07312933 - 0.37954614 - 0.276943 - -0.2548735 - 0.9237343 - -0.3586478 - -0.65259564 - 0.67842615 - -0.47164488 - -0.20399868 - 0.09143736 - 0.84724796 - -0.47076935 - -0.24657816 - 0.157211 - -0.1467024 - 0.42150247 - -0.9810306 - -0.29296878 - -0.6768669 - 0.7368343 - -0.016836716 - 0.38332418 - 0.26103643 - -0.43472654 - -0.42027396 - 0.7256403 - -0.7409004 - 0.2097269 - -0.14758325 - -0.32247585 - 0.768879 - 1.0538769 - -0.13251239 - -0.15693328 - 0.30749947 - 0.66435075 - -0.35877824 - -0.63255584 - -0.7470684 - -0.39004886 - -0.717204 - 0.1962583 - -0.31756508 - -0.14924219 - -0.048519254 - -0.64350164 - -0.08571612 - 0.16110295 - 0.2818928 - -0.1855998 - -0.013100313 - -0.32920322 - -0.08167001 - -0.60771966 - -0.16713963 - -0.042582974 - 0.2164097 - -0.055456955 - -0.83404255 - 0.15130463 - 0.23197632 - 0.24511193 - -0.36819267 - 0.46589231 - 0.1815627 - 1.1352087 - -0.6556176 - 0.61636865 - -0.048470926 - 0.6031747 - -0.43357193 - -0.3559264 - -0.57808846 - -0.20370248 - 0.2139466 - 0.17453064 - 0.9311236 - 0.103386104 - 0.06571435 - 0.5040777 - -0.48878047 - -0.629567 - 0.5088647 - -0.009554142 - 0.31415233 - -0.13485786 - -0.06238198 - 0.2109054 - -0.07454253 - -0.5029938 - -0.03624041 - -0.15522996 - -0.44486445 - -0.12613204 - 0.3527791 - 0.01934108 - -0.6815807 - 0.15968806 - 0.40955836 - 0.37752843 - -0.60240865 - 0.06797554 - 0.13444227 - 0.20517793 - 0.06795175 - 0.72272956 - 0.40924573 - -0.3583372 - -0.24953146 - 0.3454433 - 0.73249006 - -0.016263098 - 0.38937235 - 0.477409 - -0.24734643 - -0.22065666 - -0.43827358 - -0.8564954 - -0.16533723 - 0.073783174 - 0.21324871 - 0.94114935 - -0.4757835 - -0.15708917 - 0.18403076 - 0.1925097 - -0.21202324 - 0.9842098 - -0.0878644 - 0.2447358 - -0.94101125 - 0.087468654 - 0.6926329 - 0.46830428 - -0.29974294 - -0.08031401 - 0.03146644 - -0.40275338 - -0.5026202 - 0.029807257 - 0.8209338 - -0.026684912 - 0.3648769 - -0.21028933 - -0.7029544 - 0.06057348 - 0.019458666 - -0.5838164 - 0.4917751 - 0.3183142 - 0.84544075 - 0.44492447 - 0.21333058 - 0.30247962 - 0.4446727 - -0.4675721 - -0.060020737 - -0.3299496 - -0.11601128 - 0.35059682 - 0.3092138 - 0.35126644 - -0.31546658 - -0.0014095213 - 0.59492683 - 0.6572098 - -0.6723788 - -0.43647325 - 0.013674368 - -0.29085657 - 0.31547442 - 0.11456098 - -0.24219348 - -0.4402923 - -0.020581847 - -0.39323604 - -0.22420351 - -0.09342688 - 0.38753325 - -0.007150747 - 0.6456237 - -0.066635594 - -0.7063716 - -0.29063946 - -0.19057932 - 0.20498846 - 0.039399635 - -0.27865243 - 0.5537731 - 0.73458505 - -0.6050471 - -0.34599435 - -0.1824834 - -0.058190476 - -0.21754293 - 0.5583502 - -0.060159765 - -0.81757104 - -0.2916988 - 0.26248676 - -0.7621182 - -1.0135679 - -0.051746085 - -0.4234231 - 0.019949561 - -0.3188232 - -0.3598222 - 0.6737609 - 0.025792923 - 0.24157763 - 0.66929823 - -0.57916325 - -0.48869452 - 0.50248295 - -0.22694115 - -0.37696207 - -0.10308571 - -0.03803464 - -1.2336473 - 0.09218389 - -0.024684472 - -0.7819303 - -0.34423468 - 1.0444496 - -0.5913346 - -0.5812771 - 0.32361537 - 0.50330496 - 0.20229712 - -0.33572042 - -0.099175066 - -0.2046279 - 0.40374172 - 0.46500006 - 0.7372935 - -0.1699504 - -0.39697903 - -1.0821605 - -0.33508205 - 0.040522095 - 0.21879213 - -0.5729624 - -0.27783832 - -0.17614284 - -0.28024828 - 0.7211515 - -0.00059991144 - -0.45371056 - -0.28531542 - 0.14602807 - 0.04662592 - 0.391477 - 0.58497566 - 0.010007991 - -0.35190052 - 0.039734583 - -0.27348435 - -0.07543832 - 0.11173277 - -0.27350292 - -0.092721514 - 0.945073 - -0.77325374 - 0.7610948 - -0.13234852 - 0.12271928 - -0.09632763 - 0.08469461 - -0.23362634 - -0.5520911 - -0.5005254 - -0.093050085 - 0.44351986 - -0.05776892 - 0.53914416 - 0.37950468 - 0.3350498 - -0.04806131 - -0.20750779 - -0.8747888 - -0.50522244 - -0.26718557 - 0.15583205 - -0.14595877 - 0.62023103 - -0.06198295 - -0.14184205 - -0.3325085 - 0.43675852 - -0.023822654 - 0.0666159 - -0.41536772 - 0.59659994 - 0.34701145 - 0.43234462 - -0.16687086 - -0.2053485 - 0.61458313 - -0.5899166 - 0.34109336 - 0.11660128 - -0.97081697 - 0.24870658 - -0.19222844 - 1.2454556 - -0.7412164 - 0.040932313 - 0.09928498 - 0.43134254 - 1.3612144 - 0.9518776 - -0.14346507 - -0.124920174 - 0.27368224 - 0.2799998 - 0.30024117 - -0.27684867 - 0.054528676 - -0.23937213 - 0.01023684 - -0.47547257 - -0.6867144 - -0.3181229 - 0.52634346 - 0.024729064 - -0.47309282 - -0.3616146 - 0.17723028 - 0.018263185 - 0.36472362 - 0.422522 - -0.22185251 - 0.112825364 - 0.23129265 - -0.68728304 - -1.1391162 - -0.49052075 - 0.49895275 - 1.1125307 - 0.13734463 - -0.01637707 - 0.7318877 - -0.3134311 - 0.57362086 - -0.46828306 - -0.19481543 - -0.69564104 - -1.0398626 - -0.20918602 - -0.03195838 - -0.4123183 - 0.39416733 - 0.82587814 - 0.4731025 - -0.48742402 - -0.7621099 - 0.0016704369 - -0.49811795 - 0.18096921 - 0.5582192 - 0.47097775 - -0.20861416 - 0.5764632 - -0.12298558 - 0.49796546 - -0.7445811 - 0.10852885 - 0.6405106 - -0.08042282 - 0.5474277 - 0.029673172 - -0.17238168 - -0.3215491 - 0.19171812 - 0.459572 - 0.16634196 - - -0.73668385 - -0.34012684 - -0.0715523 - 0.10852202 - 0.14204022 - -0.40989703 - 0.23236847 - 0.14997244 - 0.22580299 - -0.026758675 - 0.39548117 - -0.25018567 - -0.61580956 - -1.0374275 - 0.3620463 - -0.39094338 - 0.30281666 - -0.63823456 - -0.06514027 - -0.43619013 - -0.56252056 - -0.032389965 - 0.7796189 - -0.07337697 - 0.6910109 - -0.42153034 - -0.44665217 - -0.08850418 - 0.9865168 - 0.719132 - -0.26575097 - 0.348945 - 0.2953208 - 0.06414049 - -0.6311697 - 0.49613383 - 0.5473903 - 0.34628963 - -0.30335656 - -0.37656382 - -0.24137187 - 0.22599705 - 0.7282061 - 0.31632474 - -0.5788445 - -0.048453815 - -0.99214715 - 0.67147404 - 0.40648526 - -0.08697222 - -0.09709507 - 0.13716747 - -0.3302538 - 0.6385713 - -0.4333674 - 0.17140126 - -0.10378386 - 0.34555072 - -0.35002294 - 0.37138405 - 0.6942465 - -0.012582751 - -0.29724523 - 0.3630598 - -0.3450033 - -0.58791435 - -0.2560966 - -0.06847818 - -0.82814556 - 0.5919305 - -0.5577107 - -0.3965461 - 0.3292896 - -0.66765755 - 0.10509022 - -0.029757977 - 0.18136811 - 0.74675995 - -0.2849062 - -0.7784263 - -0.46817058 - -0.15993918 - -0.5867964 - 0.81102645 - 0.40201172 - -0.8813075 - 0.13769837 - 0.21844776 - 0.65046084 - -0.4814538 - 0.13929038 - 0.1781513 - -0.25574976 - -0.6823837 - -0.32714817 - -0.7217614 - 0.672464 - 0.27874002 - -0.8074308 - 0.7273515 - 0.6546895 - 0.3032117 - 0.3657865 - -0.19880594 - -0.07629948 - 0.4256604 - -1.078911 - -0.20131199 - -1.1999272 - -0.59212506 - -0.17580846 - -0.3750479 - -0.41822523 - -0.2924359 - -0.37015983 - -0.15523282 - 0.61704636 - -0.6305304 - 0.044137325 - 0.11221082 - 1.0062258 - -0.0049912357 - -0.47516233 - 0.56906956 - 0.3740537 - 0.8261896 - -0.32491022 - -0.005009859 - -0.23551618 - -0.039758164 - -0.12311789 - 0.71079177 - -0.36461496 - 0.6814915 - 0.8928003 - 0.07970085 - 0.30849716 - 0.2967249 - -0.2721908 - 0.75819844 - -0.29332206 - -0.5996169 - 0.56722975 - -0.379029 - -0.24413143 - 0.19070697 - 1.0095695 - -0.40070263 - -0.48254392 - 0.27729446 - -0.27582094 - 0.46122116 - -0.8984841 - -0.61792296 - -0.51835155 - 0.8659856 - -0.063467644 - 0.17027345 - 0.27787298 - -0.52405035 - -0.4709656 - 0.74265236 - -0.9647818 - 0.34670112 - -0.03367323 - -0.29435757 - 0.7839863 - 0.954704 - -0.074295476 - -0.11577297 - 0.34262037 - 0.60408914 - -0.15067175 - -0.56553507 - -0.45733213 - -0.2838439 - -0.562523 - 0.4098379 - -0.15422437 - -0.26893747 - 0.063611485 - -0.72149956 - -0.101162024 - 0.11956217 - 0.19708382 - -0.3307448 - -0.15375762 - -0.30214736 - -0.25265673 - -0.49330828 - -0.3747701 - 0.011120404 - 0.18189253 - -0.030484946 - -0.6194291 - 0.21167146 - 0.227614 - 0.16227365 - -0.08826481 - 0.49216056 - 0.2937846 - 0.8931769 - -0.59395313 - 0.74639904 - 0.00653029 - 0.34859207 - -0.6235556 - -0.4537472 - -0.63391864 - -0.1443559 - 0.07003089 - 0.037237544 - 0.9805306 - 0.2809817 - 0.073027 - 0.32501057 - -0.37974665 - -0.4622294 - 0.39487654 - 0.06744876 - 0.31691965 - -0.049856696 - 0.13301113 - 0.28951162 - 0.03810055 - -0.38808325 - 0.043499887 - 0.008641668 - -0.24204402 - -0.090396896 - 0.18011324 - 0.23784415 - -0.5009206 - 0.1553507 - 0.5209598 - 0.3395558 - -0.5927804 - -0.025065303 - 0.15375154 - 0.077146195 - 0.16486229 - 0.9559272 - 0.34631437 - -0.455141 - -0.39787167 - 0.29098955 - 0.5584746 - 0.059295062 - 0.1384459 - 0.42039558 - 0.010655655 - 0.021553522 - -0.4591532 - -0.69064206 - -0.0022638962 - -0.030271845 - 0.25798056 - 0.7935306 - -0.25783426 - -0.14395986 - 0.2626888 - 0.28115913 - -0.34353155 - 1.0121855 - -0.2966 - 0.3429189 - -0.5701311 - -0.09280105 - 0.48118117 - 0.35712275 - -0.16092014 - 0.116320096 - -0.16216387 - -0.4784748 - -0.49545664 - 0.026475558 - 0.58664286 - -0.20623466 - 0.52622694 - -0.26142675 - -0.82888204 - -0.05105743 - -0.13397925 - -0.45493487 - 0.45166442 - 0.28790733 - 0.9281663 - 0.5534405 - 0.24245916 - 0.24755414 - 0.47102252 - -0.22941728 - -0.059087906 - -0.12679414 - -0.23186122 - 0.3594575 - 0.36945596 - 0.38604233 - 0.04756975 - 0.09457792 - 0.7681518 - 0.51292485 - -0.4647774 - -0.41134024 - -0.14788796 - -0.026648177 - 0.13731936 - 0.1844856 - -0.4147409 - -0.4478915 - -0.027359122 - -0.3959928 - -0.37414354 - -0.19002788 - 0.39353797 - 0.007390729 - 0.37485725 - -0.12709224 - -0.46179503 - -0.14690821 - -0.06336937 - 0.037377708 - 0.07596908 - -0.34052616 - 0.46046728 - 0.5605055 - -0.55663145 - -0.35354254 - 0.06384647 - -0.14710614 - -0.22530709 - 0.52121586 - -0.0495451 - -0.8934284 - -0.111877166 - 0.31237033 - -0.6044664 - -0.7920669 - 0.017268227 - -0.31899163 - 0.10419905 - -0.34517533 - -0.3948041 - 0.5101954 - -0.08465504 - 0.25743407 - 0.5437978 - -0.6793512 - -0.45921823 - 0.5326503 - -0.18141039 - -0.54093796 - 0.039407533 - 0.017359953 - -1.1775223 - 0.10029836 - 0.17366055 - -0.85059124 - -0.2780114 - 0.96832466 - -0.6595807 - -0.7359334 - 0.24694958 - 0.7053874 - 0.07484163 - -0.06028542 - -0.2428803 - 0.026168117 - 0.32321474 - 0.6359962 - 0.6015481 - -0.23883562 - -0.26510406 - -1.3345877 - -0.3764 - 0.07601651 - 0.27453107 - -0.3821561 - -0.23207092 - -0.09694352 - -0.4056513 - 0.6571864 - 0.13454896 - -0.4461467 - -0.30312943 - 0.1957128 - 0.015960306 - 0.40737882 - 0.36711743 - 0.07948569 - -0.3717199 - 0.16874814 - -0.31267354 - -0.23825826 - -0.0774443 - -0.38939747 - -0.14293732 - 0.90112275 - -0.72516924 - 0.984644 - 0.019674359 - 0.055611532 - -0.2934082 - 0.46005297 - -0.19833238 - -0.45715576 - -0.3313622 - -0.027743893 - 0.38861424 - 0.15984824 - 0.44063833 - 0.30860153 - 0.27471563 - -0.13229555 - -0.51096684 - -0.8010029 - -0.4840509 - -0.519855 - 0.21440867 - 0.06478418 - 0.6136018 - 0.05823111 - 0.02230786 - -0.31735128 - 0.45651013 - 0.05455484 - 0.03172924 - -0.15754183 - 0.57087106 - 0.302311 - 0.32235426 - -0.37271267 - -0.062497634 - 0.49394235 - -0.5000919 - 0.47894144 - -0.055776197 - -0.7185101 - 0.34229258 - -0.47906867 - 1.2254735 - -0.63414246 - 0.091453776 - 0.04621964 - 0.39976168 - 1.1370289 - 0.8316378 - -0.1304873 - -0.19904993 - 0.366474 - 0.40303475 - 0.19891736 - -0.2694296 - -0.007872573 - -0.21664867 - 0.07184189 - -0.30892542 - -0.51357216 - -0.5767418 - 0.6941703 - -0.054563563 - -0.5326759 - -0.47848055 - 0.40909168 - 0.11241865 - 0.43552828 - 0.08060895 - -0.3370398 - 0.17169443 - 0.21128477 - -0.65164536 - -1.0550821 - -0.3331693 - 0.5605589 - 0.8862934 - -0.022461575 - -0.13918051 - 0.85254467 - -0.26736432 - 0.40900162 - -0.54530114 - -0.109466515 - -0.5180425 - -1.1476005 - 0.017990198 - 0.22895983 - -0.48230448 - 0.32160607 - 0.71078336 - 0.48590115 - -0.64358234 - -0.75938046 - 0.105871856 - -0.4471452 - 0.071705274 - 0.46669033 - 0.29078048 - -0.109523356 - 0.56042516 - 0.11813028 - 0.46236435 - -0.7612805 - 0.08246316 - 0.561123 - -0.22669752 - 0.46435124 - -0.12556691 - -0.17807259 - -0.38081747 - 0.089175105 - 0.37251982 - 0.009899339
text-embeddings-inference/backends/candle/tests/snapshots/test_jina__jina_batch.snap/0
{ "file_path": "text-embeddings-inference/backends/candle/tests/snapshots/test_jina__jina_batch.snap", "repo_id": "text-embeddings-inference", "token_count": 13690 }
395
import inspect import torch from pathlib import Path from typing import Type, List from transformers import AutoModel from opentelemetry import trace from text_embeddings_server.models import Model from text_embeddings_server.models.types import PaddedBatch, Embedding tracer = trace.get_tracer(__name__) class DefaultModel(Model): def __init__(self, model_path: Path, device: torch.device, dtype: torch.dtype): model = AutoModel.from_pretrained(model_path).to(dtype).to(device) self.hidden_size = model.config.hidden_size self.has_position_ids = ( inspect.signature(model.forward).parameters.get("position_ids", None) is not None ) self.has_token_type_ids = ( inspect.signature(model.forward).parameters.get("token_type_ids", None) is not None ) super(DefaultModel, self).__init__(model=model, dtype=dtype, device=device) @property def batch_type(self) -> Type[PaddedBatch]: return PaddedBatch @tracer.start_as_current_span("embed") def embed(self, batch: PaddedBatch) -> List[Embedding]: kwargs = {"input_ids": batch.input_ids, "attention_mask": batch.attention_mask} if self.has_token_type_ids: kwargs["token_type_ids"] = batch.token_type_ids if self.has_position_ids: kwargs["position_ids"] = batch.position_ids output = self.model(**kwargs) embedding = output[0][:, 0] cpu_results = embedding.view(-1).tolist() return [ Embedding( values=cpu_results[i * self.hidden_size : (i + 1) * self.hidden_size] ) for i in range(len(batch)) ]
text-embeddings-inference/backends/python/server/text_embeddings_server/models/default_model.py/0
{ "file_path": "text-embeddings-inference/backends/python/server/text_embeddings_server/models/default_model.py", "repo_id": "text-embeddings-inference", "token_count": 738 }
396
use hf_hub::api::tokio::{ApiError, ApiRepo}; use std::path::PathBuf; use tracing::instrument; // Old classes used other config names than 'sentence_bert_config.json' pub const ST_CONFIG_NAMES: [&str; 7] = [ "sentence_bert_config.json", "sentence_roberta_config.json", "sentence_distilbert_config.json", "sentence_camembert_config.json", "sentence_albert_config.json", "sentence_xlm-roberta_config.json", "sentence_xlnet_config.json", ]; #[instrument(skip_all)] pub async fn download_artifacts(api: &ApiRepo) -> Result<PathBuf, ApiError> { let start = std::time::Instant::now(); tracing::info!("Starting download"); api.get("config.json").await?; api.get("tokenizer.json").await?; let model_root = match api.get("model.safetensors").await { Ok(p) => p, Err(_) => { let p = api.get("pytorch_model.bin").await?; tracing::warn!("`model.safetensors` not found. Using `pytorch_model.bin` instead. Model loading will be significantly slower."); p } } .parent() .unwrap() .to_path_buf(); tracing::info!("Model artifacts downloaded in {:?}", start.elapsed()); Ok(model_root) } #[instrument(skip_all)] pub async fn download_pool_config(api: &ApiRepo) -> Result<PathBuf, ApiError> { let pool_config_path = api.get("1_Pooling/config.json").await?; Ok(pool_config_path) } #[instrument(skip_all)] pub async fn download_st_config(api: &ApiRepo) -> Result<PathBuf, ApiError> { // Try default path let err = match api.get(ST_CONFIG_NAMES[0]).await { Ok(st_config_path) => return Ok(st_config_path), Err(err) => err, }; for name in &ST_CONFIG_NAMES[1..] { if let Ok(st_config_path) = api.get(name).await { return Ok(st_config_path); } } Err(err) }
text-embeddings-inference/core/src/download.rs/0
{ "file_path": "text-embeddings-inference/core/src/download.rs", "repo_id": "text-embeddings-inference", "token_count": 818 }
397
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Serving private and gated models If the model you wish to serve is behind gated access or resides in a private model repository on Hugging Face Hub, you will need to have access to the model to serve it. Once you have confirmed that you have access to the model: - Navigate to your account's [Profile | Settings | Access Tokens page](https://huggingface.co/settings/tokens). - Generate and copy a read token. If you're the CLI, set the `HUGGING_FACE_HUB_TOKEN` environment variable. For example: ```shell export HUGGING_FACE_HUB_TOKEN=<YOUR READ TOKEN> ``` Alternatively, you can provide the token when deploying the model with Docker: ```shell model=<your private model> volume=$PWD/data token=<your cli Hugging Face Hub token> docker run --gpus all -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data --pull always ghcr.io/huggingface/text-embeddings-inference:1.2 --model-id $model ```
text-embeddings-inference/docs/source/en/private_models.md/0
{ "file_path": "text-embeddings-inference/docs/source/en/private_models.md", "repo_id": "text-embeddings-inference", "token_count": 471 }
398
use opentelemetry::sdk::propagation::TraceContextPropagator; use opentelemetry::sdk::trace::Sampler; use opentelemetry::sdk::{trace, Resource}; use opentelemetry::{global, KeyValue}; use opentelemetry_otlp::WithExportConfig; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::{EnvFilter, Layer}; /// Init logging using env variables LOG_LEVEL and LOG_FORMAT: /// - otlp_endpoint is an optional URL to an Open Telemetry collector /// - LOG_LEVEL may be TRACE, DEBUG, INFO, WARN or ERROR (default to INFO) pub fn init_logging(otlp_endpoint: Option<&String>, json_output: bool) -> bool { let mut layers = Vec::new(); // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); let fmt_layer = match json_output { true => fmt_layer.json().flatten_event(true).boxed(), false => fmt_layer.boxed(), }; layers.push(fmt_layer); // OpenTelemetry tracing layer let mut global_tracer = false; if let Some(otlp_endpoint) = otlp_endpoint { global::set_text_map_propagator(TraceContextPropagator::new()); let tracer = opentelemetry_otlp::new_pipeline() .tracing() .with_exporter( opentelemetry_otlp::new_exporter() .tonic() .with_endpoint(otlp_endpoint), ) .with_trace_config( trace::config() .with_resource(Resource::new(vec![KeyValue::new( "service.name", "text-embeddings-inference.router", )])) .with_sampler(Sampler::AlwaysOn), ) .install_batch(opentelemetry::runtime::Tokio); if let Ok(tracer) = tracer { layers.push(tracing_opentelemetry::layer().with_tracer(tracer).boxed()); init_tracing_opentelemetry::init_propagator().unwrap(); global_tracer = true; }; } // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(layers) .init(); global_tracer }
text-embeddings-inference/router/src/logging.rs/0
{ "file_path": "text-embeddings-inference/router/src/logging.rs", "repo_id": "text-embeddings-inference", "token_count": 1085 }
399
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi text-embeddings-router --port 8080 --json-output
text-embeddings-inference/sagemaker-entrypoint.sh/0
{ "file_path": "text-embeddings-inference/sagemaker-entrypoint.sh", "repo_id": "text-embeddings-inference", "token_count": 116 }
400
FROM lukemathwalker/cargo-chef:latest-rust-1.78 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef as planner COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder ARG GIT_SHA ARG DOCKER_LABEL RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo build --release # Text Generation Inference base image for Intel FROM intel/intel-extension-for-pytorch:2.1.30-xpu as base USER root # libssl.so.1.1 is not installed on Ubuntu 22.04 by default, install it RUN wget http://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb && \ dpkg -i ./libssl1.1_1.1.1f-1ubuntu2_amd64.deb RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB \ | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list RUN apt-get update && apt install -y intel-basekit xpu-smi # Text Generation Inference base env ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN wget https://intel-extension-for-pytorch.s3.amazonaws.com/ipex_dev/xpu/intel_extension_for_pytorch-2.1.30a0-cp310-cp310-linux_x86_64.whl RUN pip install intel_extension_for_pytorch-2.1.30a0-cp310-cp310-linux_x86_64.whl # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements_cuda.txt && \ pip install ".[accelerate, peft, outlines]" --no-cache-dir ENV CCL_ROOT=/opt/intel/oneapi/ccl/latest ENV I_MPI_ROOT=/opt/intel/oneapi/mpi/latest ENV FI_PROVIDER_PATH=/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/lib/prov:/usr/lib/x86_64-linux-gnu/libfabric ENV LIBRARY_PATH=/opt/intel/oneapi/mpi/latest/lib:/opt/intel/oneapi/ccl/latest/lib/:/opt/intel/oneapi/mkl/latest/lib/:/opt/intel/oneapi/compiler/latest/lib ENV LD_LIBRARY_PATH=/opt/intel/oneapi/ccl/latest/lib/:/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/lib:/opt/intel/oneapi/mpi/latest/lib:/opt/intel/oneapi/mkl/latest/lib:/opt/intel/oneapi/compiler/latest/opt/compiler/lib:/opt/intel/oneapi/compiler/latest/lib:/opt/intel/oneapi/lib:/opt/intel/oneapi/lib/intel64: ENV PATH=/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/bin:/opt/intel/oneapi/mpi/latest/bin:/opt/intel/oneapi/mpi/latest/opt/mpi/libfabric/bin:/opt/intel/oneapi/mkl/latest/bin/:/opt/intel/oneapi/compiler/latest/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ENV CCL_ZE_IPC_EXCHANGE=sockets # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher # Final image FROM base ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
text-generation-inference/Dockerfile_intel/0
{ "file_path": "text-generation-inference/Dockerfile_intel", "repo_id": "text-generation-inference", "token_count": 1534 }
401
unit-tests: python -m pytest --cov=text_generation tests install: pip install pip --upgrade pip install -e .
text-generation-inference/clients/python/Makefile/0
{ "file_path": "text-generation-inference/clients/python/Makefile", "repo_id": "text-generation-inference", "token_count": 41 }
402
- sections: - local: index title: Text Generation Inference - local: quicktour title: Quick Tour - local: installation title: Installation - local: supported_models title: Supported Models and Hardware - local: messages_api title: Messages API title: Getting started - sections: - local: basic_tutorials/consuming_tgi title: Consuming TGI - local: basic_tutorials/preparing_model title: Preparing Model for Serving - local: basic_tutorials/gated_model_access title: Serving Private & Gated Models - local: basic_tutorials/using_cli title: Using TGI CLI - local: basic_tutorials/launcher title: All TGI CLI options - local: basic_tutorials/non_core_models title: Non-core Model Serving - local: basic_tutorials/safety title: Safety - local: basic_tutorials/using_guidance title: Using Guidance, JSON, tools - local: basic_tutorials/visual_language_models title: Visual Language Models title: Tutorials - sections: - local: conceptual/streaming title: Streaming - local: conceptual/quantization title: Quantization - local: conceptual/tensor_parallelism title: Tensor Parallelism - local: conceptual/paged_attention title: PagedAttention - local: conceptual/safetensors title: Safetensors - local: conceptual/flash_attention title: Flash Attention - local: conceptual/speculation title: Speculation (Medusa, ngram) - local: conceptual/guidance title: How Guidance Works (via outlines) title: Conceptual Guides
text-generation-inference/docs/source/_toctree.yml/0
{ "file_path": "text-generation-inference/docs/source/_toctree.yml", "repo_id": "text-generation-inference", "token_count": 505 }
403
# Streaming ## What is Streaming? Token streaming is the mode in which the server returns the tokens one by one as the model generates them. This enables showing progressive generations to the user rather than waiting for the whole generation. Streaming is an essential aspect of the end-user experience as it reduces latency, one of the most critical aspects of a smooth experience. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual_360.gif" /> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/tgi/streaming-generation-visual-dark_360.gif" /> </div> With token streaming, the server can start returning the tokens one by one before having to generate the whole response. Users can have a sense of the generation's quality before the end of the generation. This has different positive effects: * Users can get results orders of magnitude earlier for extremely long queries. * Seeing something in progress allows users to stop the generation if it's not going in the direction they expect. * Perceived latency is lower when results are shown in the early stages. * When used in conversational UIs, the experience feels more natural. For example, a system can generate 100 tokens per second. If the system generates 1000 tokens, with the non-streaming setup, users need to wait 10 seconds to get results. On the other hand, with the streaming setup, users get initial results immediately, and although end-to-end latency will be the same, they can see half of the generation after five seconds. Below you can see an interactive demo that shows non-streaming vs streaming side-by-side. Click **generate** below. <div class="block dark:hidden"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=light" width="850" height="350" ></iframe> </div> <div class="hidden dark:block"> <iframe src="https://osanseviero-streaming-vs-non-streaming.hf.space?__theme=dark" width="850" height="350" ></iframe> </div> ## How to use Streaming? ### Streaming with Python To stream tokens with `InferenceClient`, simply pass `stream=True` and iterate over the response. ```python from huggingface_hub import InferenceClient client = InferenceClient("http://127.0.0.1:8080") for token in client.text_generation("How do you make cheese?", max_new_tokens=12, stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` If you want additional details, you can add `details=True`. In this case, you get a `TextGenerationStreamResponse` which contains additional information such as the probabilities and the tokens. For the final response in the stream, it also returns the full generated text. ```python for details in client.text_generation("How do you make cheese?", max_new_tokens=12, details=True, stream=True): print(details) #TextGenerationStreamResponse(token=Token(id=193, text='\n', logprob=-0.007358551, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=2044, text='To', logprob=-1.1357422, special=False), generated_text=None, details=None) #TextGenerationStreamResponse(token=Token(id=717, text=' make', logprob=-0.009841919, special=False), generated_text=None, details=None) #... #TextGenerationStreamResponse(token=Token(id=25, text='.', logprob=-1.3408203, special=False), generated_text='\nTo make cheese, you need to start with milk.', details=StreamDetails(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=12, seed=None)) ``` The `huggingface_hub` library also comes with an `AsyncInferenceClient` in case you need to handle the requests concurrently. ```python from huggingface_hub import AsyncInferenceClient client = AsyncInferenceClient("http://127.0.0.1:8080") async for token in await client.text_generation("How do you make cheese?", stream=True): print(token) # To # make # cheese #, # you # need # to # start # with # milk #. ``` ### Streaming with cURL To use the `generate_stream` endpoint with curl, you can add the `-N` flag, which disables curl default buffering and shows data as it arrives from the server ```curl curl -N 127.0.0.1:8080/generate_stream \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` ### Streaming with JavaScript First, we need to install the `@huggingface/inference` library. `npm install @huggingface/inference` If you're using the free Inference API, you can use `HfInference`. If you're using inference endpoints, you can use `HfInferenceEndpoint`. We can create a `HfInferenceEndpoint` providing our endpoint URL and credential. ```js import { HfInferenceEndpoint } from '@huggingface/inference' const hf = new HfInferenceEndpoint('https://YOUR_ENDPOINT.endpoints.huggingface.cloud', 'hf_YOUR_TOKEN') // prompt const prompt = 'What can you do in Nuremberg, Germany? Give me 3 Tips' const stream = hf.textGenerationStream({ inputs: prompt }) for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text) } ``` ## How does Streaming work under the hood? Under the hood, TGI uses Server-Sent Events (SSE). In an SSE Setup, a client sends a request with the data, opening an HTTP connection and subscribing to updates. Afterward, the server sends data to the client. There is no need for further requests; the server will keep sending the data. SSEs are unidirectional, meaning the client does not send other requests to the server. SSE sends data over HTTP, making it easy to use. SSEs are different than: * Polling: where the client keeps calling the server to get data. This means that the server might return empty responses and cause overhead. * Webhooks: where there is a bi-directional connection. The server can send information to the client, but the client can also send data to the server after the first request. Webhooks are more complex to operate as they don’t only use HTTP. If there are too many requests at the same time, TGI returns an HTTP Error with an `overloaded` error type (`huggingface_hub` returns `OverloadedError`). This allows the client to manage the overloaded server (e.g., it could display a busy error to the user or retry with a new request). To configure the maximum number of concurrent requests, you can specify `--max_concurrent_requests`, allowing clients to handle backpressure.
text-generation-inference/docs/source/conceptual/streaming.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/streaming.md", "repo_id": "text-generation-inference", "token_count": 1969 }
404
[ { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "hd" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "aho" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "2" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "2" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "2" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "ima" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "." } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "." } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": "." } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "\n" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " Sarah" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " Yes" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " And" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "i" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "'" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "," } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " what" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "'" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": "s" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " Moh" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " is" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": "m" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " Room" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "s" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " the" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": " tired" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": ":" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": "'" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " capital" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": " of" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 0, "logprobs": null, "text": " She" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 1, "logprobs": null, "text": " scale" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 2, "logprobs": null, "text": " of" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" }, { "choices": [ { "finish_reason": "", "index": 3, "logprobs": null, "text": " being" } ], "created": 1713284431, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "2.0.1-native" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts_stream.json", "repo_id": "text-generation-inference", "token_count": 7116 }
405
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 806, "logprob": -11.890625, "text": "Wh" }, { "id": 1446, "logprob": -3.6699219, "text": "ats" }, { "id": 2921, "logprob": -7.8203125, "text": "Go" }, { "id": 468, "logprob": -8.0703125, "text": "og" }, { "id": 793, "logprob": -2.1875, "text": "les" }, { "id": 16332, "logprob": -9.7109375, "text": "DNS" } ], "seed": null, "tokens": [ { "id": 29946, "logprob": -1.4765625, "special": false, "text": "4" }, { "id": 29906, "logprob": -0.9199219, "special": false, "text": "2" }, { "id": 29889, "logprob": 0.0, "special": false, "text": "." }, { "id": 29896, "logprob": -1.1367188, "special": false, "text": "1" }, { "id": 29889, "logprob": -1.4648438, "special": false, "text": "." }, { "id": 29896, "logprob": -0.40722656, "special": false, "text": "1" }, { "id": 29889, "logprob": -0.17419434, "special": false, "text": "." }, { "id": 29896, "logprob": -0.20251465, "special": false, "text": "1" }, { "id": 29900, "logprob": -1.5527344, "special": false, "text": "0" }, { "id": 29896, "logprob": -1.3710938, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": "42.1.1.101" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_grammar_llama/test_flash_llama_grammar_regex.json", "repo_id": "text-generation-inference", "token_count": 1292 }
406
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 50278, "logprob": null, "text": "<|prompter|>" }, { "id": 1276, "logprob": -8.03125, "text": "What" }, { "id": 310, "logprob": -5.421875, "text": " is" }, { "id": 247, "logprob": -2.1601562, "text": " a" }, { "id": 1167, "logprob": -5.4609375, "text": " mem" }, { "id": 70, "logprob": -0.005657196, "text": "e" }, { "id": 13, "logprob": -7.28125, "text": "," }, { "id": 285, "logprob": -0.2980957, "text": " and" }, { "id": 752, "logprob": -2.1679688, "text": " what" }, { "id": 434, "logprob": -5.6210938, "text": "'s" }, { "id": 253, "logprob": -0.81103516, "text": " the" }, { "id": 2892, "logprob": -6.6640625, "text": " history" }, { "id": 3212, "logprob": -2.265625, "text": " behind" }, { "id": 436, "logprob": -11.5078125, "text": " this" }, { "id": 3159, "logprob": -2.1582031, "text": " word" }, { "id": 32, "logprob": -0.008720398, "text": "?" }, { "id": 0, "logprob": -2.4726562, "text": "<|endoftext|>" }, { "id": 50281, "logprob": -18.265625, "text": "<|assistant|>" } ], "seed": null, "tokens": [ { "id": 510, "logprob": -0.63183594, "special": false, "text": "The" }, { "id": 3159, "logprob": -0.5390625, "special": false, "text": " word" }, { "id": 346, "logprob": -0.045684814, "special": false, "text": " \"" }, { "id": 6441, "logprob": -0.002090454, "special": false, "text": "mem" }, { "id": 70, "logprob": -1.3589859e-05, "special": false, "text": "e" }, { "id": 3, "logprob": -0.0009455681, "special": false, "text": "\"" }, { "id": 369, "logprob": -0.088012695, "special": false, "text": " was" }, { "id": 806, "logprob": -0.12585449, "special": false, "text": " first" }, { "id": 908, "logprob": -0.017196655, "special": false, "text": " used" }, { "id": 275, "logprob": -0.49731445, "special": false, "text": " in" } ] }, "generated_text": "The word \"meme\" was first used in" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox_sharded/test_flash_neox.json", "repo_id": "text-generation-inference", "token_count": 1970 }
407
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 20, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -8.5859375, "text": " ge" }, { "id": 21017, "logprob": -7.5859375, "text": "ometric" }, { "id": 81, "logprob": -0.2668457, "text": "_" }, { "id": 6009, "logprob": -1.6416016, "text": "mean" }, { "id": 26, "logprob": -0.22705078, "text": "(" }, { "id": 62, "logprob": -5.2304688, "text": "L" }, { "id": 44, "logprob": -3.0976562, "text": ":" }, { "id": 1682, "logprob": -1.1044922, "text": " List" }, { "id": 77, "logprob": -0.14294434, "text": "[" }, { "id": 1808, "logprob": -0.32299805, "text": "float" }, { "id": 10794, "logprob": -2.8164062, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -0.1282959, "special": false, "text": "\n " }, { "id": 1524, "logprob": -0.97998047, "special": false, "text": " \"\"\"" }, { "id": 284, "logprob": -0.7006836, "special": false, "text": "\n " }, { "id": 14883, "logprob": -2.1933594, "special": false, "text": " Calculate" }, { "id": 322, "logprob": -0.2697754, "special": false, "text": " the" }, { "id": 3226, "logprob": -0.0836792, "special": false, "text": " ge" }, { "id": 21017, "logprob": -0.018737793, "special": false, "text": "ometric" }, { "id": 5651, "logprob": -0.028640747, "special": false, "text": " mean" }, { "id": 432, "logprob": -0.29467773, "special": false, "text": " of" }, { "id": 312, "logprob": -0.31518555, "special": false, "text": " a" }, { "id": 1149, "logprob": -0.20605469, "special": false, "text": " list" }, { "id": 432, "logprob": -0.23254395, "special": false, "text": " of" }, { "id": 7515, "logprob": -0.4489746, "special": false, "text": " numbers" }, { "id": 32, "logprob": -0.6044922, "special": false, "text": "." }, { "id": 446, "logprob": -0.63964844, "special": false, "text": "\n\n " }, { "id": 499, "logprob": -1.1953125, "special": false, "text": " :" }, { "id": 753, "logprob": -0.03515625, "special": false, "text": "param" }, { "id": 498, "logprob": -0.06311035, "special": false, "text": " L" }, { "id": 44, "logprob": -0.003414154, "special": false, "text": ":" }, { "id": 1682, "logprob": -1.3310547, "special": false, "text": " List" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n Calculate the geometric mean of a list of numbers.\n\n :param L: List" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json", "repo_id": "text-generation-inference", "token_count": 2389 }
408
import pytest @pytest.fixture(scope="module") def bloom_560m_sharded_handle(launcher): with launcher("bigscience/bloom-560m", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def bloom_560m_sharded(bloom_560m_sharded_handle): await bloom_560m_sharded_handle.health(240) return bloom_560m_sharded_handle.client @pytest.mark.asyncio async def test_bloom_560m_sharded(bloom_560m_sharded, response_snapshot): response = await bloom_560m_sharded.generate( "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, top_p=0.9, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_bloom_560m_sharded_load( bloom_560m_sharded, generate_load, response_snapshot ): responses = await generate_load( bloom_560m_sharded, "Pour déguster un ortolan, il faut tout d'abord", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_bloom_560m_sharded.py", "repo_id": "text-generation-inference", "token_count": 511 }
409
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_santacoder.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_santacoder.py", "repo_id": "text-generation-inference", "token_count": 387 }
410
[tool.poetry] name = "text-generation-integration-tests" version = "2.0.1" description = "Text Generation Inference integration tests" authors = ["Nicolas Patry <nicolas@huggingface.co>"] [tool.poetry.dependencies] pydantic = "> 2, < 3" python = ">=3.9,<3.13" syrupy = "4.0.1" text-generation = "^0.6.0" pytest = "^7.4.0" pytest-asyncio = "^0.21.1" docker = "^6.1.3"
text-generation-inference/integration-tests/pyproject.toml/0
{ "file_path": "text-generation-inference/integration-tests/pyproject.toml", "repo_id": "text-generation-inference", "token_count": 163 }
411
use std::fs; fn main() -> Result<(), Box<dyn std::error::Error>> { println!("cargo:rerun-if-changed=../../proto/generate.proto"); fs::create_dir("src/pb").unwrap_or(()); let mut config = prost_build::Config::new(); config.protoc_arg("--experimental_allow_proto3_optional"); tonic_build::configure() .build_client(true) .build_server(false) .out_dir("src/pb") .include_file("mod.rs") .compile_with_config(config, &["../../proto/generate.proto"], &["../../proto"]) .unwrap_or_else(|e| panic!("protobuf compilation failed: {e}")); Ok(()) }
text-generation-inference/router/client/build.rs/0
{ "file_path": "text-generation-inference/router/client/build.rs", "repo_id": "text-generation-inference", "token_count": 270 }
412
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
text-generation-inference/sagemaker-entrypoint.sh/0
{ "file_path": "text-generation-inference/sagemaker-entrypoint.sh", "repo_id": "text-generation-inference", "token_count": 239 }
413
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #include "column_remap.cuh" #include "../util.cuh" const int SHUF_BLOCKSIZE_X = 256; const int SHUF_BLOCKSIZE_Y = 16; __global__ void column_remap_kernel ( const half* __restrict__ x, half* __restrict__ x_new, const int x_width, const int x_height, const uint32_t* x_map ) { int x_column = SHUF_BLOCKSIZE_X * blockIdx.x + threadIdx.x; int x_row = SHUF_BLOCKSIZE_Y * blockIdx.y; int x_stride = x_width; int x_idx = x_row * x_stride + x_column; int x_row_end = min(x_row + SHUF_BLOCKSIZE_Y, x_height); int x_idx_end = x_row_end * x_stride + x_column; int s_column = x_map[x_column]; int s_idx = x_row * x_stride + s_column; while (x_idx < x_idx_end) { x_new[x_idx] = x[s_idx]; x_idx += x_stride; s_idx += x_stride; } } // Remap columns in x to correspond to sequential group index before matmul // // perform x -> seq_x such that seq_x @ seq_w == x @ w void column_remap_cuda ( const half* x, half* x_new, const int x_height, const int x_width, const uint32_t* x_map ) { dim3 threads(SHUF_BLOCKSIZE_X, 1, 1); dim3 blocks ( (x_width + SHUF_BLOCKSIZE_X - 1) / SHUF_BLOCKSIZE_X, (x_height + SHUF_BLOCKSIZE_Y - 1) / SHUF_BLOCKSIZE_Y, 1 ); column_remap_kernel<<<blocks, threads>>>(x, x_new, x_width, x_height, x_map); }
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cu", "repo_id": "text-generation-inference", "token_count": 696 }
414
#include "q_gemm.cuh" #include "util.cuh" #include "matrix_view.cuh" #include "../config.h" #include "quant/qdq_2.cuh" #include "quant/qdq_3.cuh" #include "quant/qdq_4.cuh" #include "quant/qdq_5.cuh" #include "quant/qdq_6.cuh" #include "quant/qdq_8.cuh" #define GPTQ_BLOCK_KN_SIZE 128 #define GPTQ_BLOCK_M_SIZE_MAX 8 #define GPTQ_MAX_GROUPS_IN_BLOCK (GPTQ_BLOCK_KN_SIZE / 32) #define EXL2_BLOCK_KN_SIZE 64 #define EXL2_BLOCK_M_SIZE_MAX 8 #define EXL2_MAX_GROUPS_IN_BLOCK (EXL2_BLOCK_KN_SIZE / 32) #define CLEAR_N_SIZE 256 #include "q_gemm_kernel.cuh" #include "q_gemm_kernel_gptq.cuh" void gemm_half_q_half_cuda_part ( const half* a, QMatrix* b, half* c, int size_m, int size_n, int size_k, int m_count, bool clear, const half* r_weights, int r_weights_stride, bool mul_r_weights ) { const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (!b->is_gptq) { dim3 blockDim, gridDim; blockDim.x = EXL2_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; gridDim.x = DIVIDE(size_n, EXL2_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); gridDim.z = DIVIDE(size_k, EXL2_BLOCK_KN_SIZE); fp_gemm_half_q_half_kernel kernel = pick_gemm_half_q_half_kernel(m_count, r_weights != NULL, mul_r_weights); kernel<<<gridDim, blockDim, 0, stream>>> ( a, b->cuda_q_weight, b->cuda_q_scale, b->cuda_q_scale_max, c, size_m, size_n, size_k, b->groups, b->cuda_q_group_map, b->cuda_q_perm, b->rows_8, b->rows_6, b->rows_5, b->rows_4, b->rows_3, b->rows_2, clear, r_weights, r_weights_stride ); } else { dim3 blockDim, gridDim; blockDim.x = GPTQ_BLOCK_KN_SIZE; blockDim.y = 1; blockDim.z = 1; gridDim.x = DIVIDE(size_n, GPTQ_BLOCK_KN_SIZE * 4); gridDim.y = DIVIDE(size_m, m_count); gridDim.z = DIVIDE(size_k, GPTQ_BLOCK_KN_SIZE); fp_gemm_half_q_half_gptq_kernel kernel = pick_gemm_half_q_half_gptq_kernel(m_count, r_weights != NULL, mul_r_weights); // DBGX((uint64_t) r_weights); // if (r_weights) // print_global_mem(r_weights, 1, 1, 1); // DBGI(r_weights_stride); kernel<<<gridDim, blockDim, 0, stream>>> ( a, b->cuda_q_weight, b->cuda_gptq_qzeros, b->cuda_gptq_scales, c, size_m, size_n, size_k, b->groups, b->gptq_groupsize, b->cuda_q_perm, b->rows_4, clear, r_weights, r_weights_stride ); } } void gemm_half_q_half_cuda ( cublasHandle_t cublas_handle, const half* a, QMatrix* b, half* c, int size_m, int size_n, int size_k, bool clear, half* temp_dq, bool force_cuda, const half* r_weights, const int r_weights_stride, bool mul_r_weights ) { if (size_m > MAX_Q_GEMM_ROWS && !force_cuda) { // Reconstruct FP16 matrix, then cuBLAS if (!temp_dq) temp_dq = b->temp_dq; b->reconstruct(temp_dq); //cublasSetMathMode(cublas_handle, CUBLAS_TENSOR_OP_MATH); const half alpha = __float2half(1.0f); const half beta = clear ? __float2half(0.0f) : __float2half(1.0f); cublasHgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, size_n, size_m, size_k, &alpha, temp_dq, size_n, a, size_k, &beta, c, size_n); //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; //cublasSgemmEx(cublas_handle, // CUBLAS_OP_N, // CUBLAS_OP_N, // size_n, size_m, size_k, // &alpha, temp_dq, CUDA_R_16F, size_n, // a, CUDA_R_16F, size_k, // &beta, c, CUDA_R_16F, size_n); //const float alpha = 1.0f; //const float beta = clear ? 0.0f : 1.0f; //cublasGemmEx(cublas_handle, // CUBLAS_OP_N, CUBLAS_OP_N, // size_n, size_m, size_k, // &alpha, temp_dq, CUDA_R_16F, size_n, // a, CUDA_R_16F, size_k, // &beta, c, CUDA_R_16F, size_n, // CUDA_R_16F, CUBLAS_GEMM_DFALT_TENSOR_OP); } else { // Quantized matmul int block_m_size_max = b->is_gptq ? GPTQ_BLOCK_M_SIZE_MAX : EXL2_BLOCK_M_SIZE_MAX; int max_chunks = size_m / block_m_size_max; int last_chunk = max_chunks * block_m_size_max; int last_chunk_size = size_m - last_chunk; if (max_chunks) { gemm_half_q_half_cuda_part(a, b, c, last_chunk, size_n, size_k, block_m_size_max, clear, r_weights, r_weights_stride, mul_r_weights); } if (last_chunk_size) { gemm_half_q_half_cuda_part(a + last_chunk * size_k, b, c + last_chunk * size_n, last_chunk_size, size_n, size_k, last_chunk_size, clear, r_weights, r_weights_stride, mul_r_weights); } } } __global__ void clear_kernel ( half* __restrict__ c, const int size_m, const int size_n ) { int m = blockIdx.y; int n = (blockIdx.x * CLEAR_N_SIZE + threadIdx.x) * 8; if (n >= size_n) return; int4* c_ptr = (int4*)(c + m * size_n + n); *c_ptr = {}; } void clear_tensor_cuda ( half* c, int size_m, int size_n ) { // dim3 blockDim, gridDim; // blockDim.x = CLEAR_N_SIZE; // blockDim.y = 1; // gridDim.x = DIVIDE(size_n / 8, CLEAR_N_SIZE); // gridDim.y = size_m; // clear_kernel<<<gridDim, blockDim>>>(c, size_m, size_n); }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cu", "repo_id": "text-generation-inference", "token_count": 3563 }
415
import torch from typing import Dict, Optional, TypeVar from text_generation_server.models.types import Batch B = TypeVar("B", bound=Batch) class Cache: def __init__(self): self.cache: Dict[int, B] = {} def pop(self, batch_id: int) -> Optional[B]: return self.cache.pop(batch_id, None) def set(self, entry: B): if entry is not None: self.cache[entry.batch_id] = entry def delete(self, batch_id: int): batch = self.pop(batch_id) if batch is not None: del batch if torch.cuda.is_available(): torch.cuda.empty_cache() def clear(self): keys = list(self.cache.keys()) for k in keys: self.delete(k) def __len__(self): return len(self.cache.keys())
text-generation-inference/server/text_generation_server/cache.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/cache.py", "repo_id": "text-generation-inference", "token_count": 359 }
416
import torch from torch import nn from accelerate import init_empty_weights from text_generation_server.utils.import_utils import ( SYSTEM, ) # Monkey patching @classmethod def load_layer_norm(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") bias = weights.get_tensor(f"{prefix}.bias") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = torch.nn.Parameter(bias) return ln @classmethod def load_layer_norm_no_bias(cls, prefix, weights, eps): weight = weights.get_tensor(f"{prefix}.weight") with init_empty_weights(): ln = cls(weight.shape, eps=eps) ln.weight = torch.nn.Parameter(weight) ln.bias = None return ln torch.nn.LayerNorm.load = load_layer_norm torch.nn.LayerNorm.load_no_bias = load_layer_norm_no_bias if SYSTEM == "cuda": import dropout_layer_norm class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states return super(FastLayerNorm, self).forward(hidden_states), residual else: ( normed_hidden_states, residual, *rest, ) = dropout_layer_norm.dropout_add_ln_fwd( hidden_states, residual, self.weight, self.bias, None, None, None, None, 0.0, self.eps, 1.0, 0, None, False, False, ) if residual is None: residual = hidden_states return normed_hidden_states, residual elif SYSTEM == "rocm": from vllm import layernorm_ops class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states return super().forward(hidden_states), residual elif SYSTEM == "xpu": import intel_extension_for_pytorch as ipex class FastLayerNorm(nn.LayerNorm): def forward(self, hidden_states, residual=None): res_out = hidden_states out = ipex.llm.functional.add_layer_norm( residual, hidden_states, self.weight, self.bias, self.eps, True ) if residual is not None: res_out = residual return out, res_out class FastRMSNorm(nn.Module): def __init__(self, weight: torch.Tensor, eps: float): super().__init__() self.weight = nn.Parameter(weight) self.variance_epsilon = eps @classmethod def load(cls, prefix, weights, eps=1e-6): weight = weights.get_tensor(f"{prefix}.weight") return cls(weight, eps) def forward(self, hidden_states, residual=None): if SYSTEM == "xpu": residual_out = hidden_states out = ipex.llm.functional.add_rms_norm( residual, hidden_states, self.weight, None, self.variance_epsilon, True, ) if residual is not None: residual_out = residual return out, residual_out elif hidden_states.shape[-1] > 8192: if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt( variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states, residual elif SYSTEM == "cuda": # faster post attention rms norm ( normed_hidden_states, res, *rest, ) = dropout_layer_norm.dropout_add_ln_fwd( hidden_states, residual, self.weight, None, None, None, None, None, 0.0, self.variance_epsilon, 1.0, 0, None, False, True, # Activate RMSNorm ) if res is None: res = hidden_states return normed_hidden_states, res elif SYSTEM == "rocm": # We use VLLM RMSNorm kernel that can be compiled for RoCm, instead of Flash Attention ones that can not. if residual is not None: hidden_states += residual residual = hidden_states out = torch.empty_like(hidden_states) layernorm_ops.rms_norm( out, hidden_states, self.weight.data, self.variance_epsilon, ) return out, residual else: raise ValueError( "Your system seem to be not supported. Please check your install or open an issue at https://github.com/huggingface/text-generation-inference/issues with a clear reproduction." )
text-generation-inference/server/text_generation_server/layers/layernorm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/layernorm.py", "repo_id": "text-generation-inference", "token_count": 3061 }
417
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, SpeculativeHead, ) from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import ( FastRMSNorm, ) def load_attention(config, prefix, weights): bias = config.attention_bias if config.num_attention_heads != config.num_key_value_heads: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=bias, ) else: if config.model_type == "baichuan": return TensorParallelColumnLinear.load_qkv( config, prefix=f"{prefix}.W_pack", weights=weights, bias=bias, ) elif config.model_type == "phi3": return TensorParallelColumnLinear.load_qkv( config, prefix=f"{prefix}.qkv_proj", weights=weights, bias=bias, ) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=bias, ) class FlashLlamaAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size, base=config.rope_theta, device=weights.device, ) self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) paged_attention.reshape_and_cache( kv[:, 0], kv[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class LlamaMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj bias = getattr(config, "mlp_bias", False) if config.model_type == "phi3": self.gate_up_proj = TensorParallelColumnLinear.load_gate_up( config, prefix=f"{prefix}.gate_up_proj", weights=weights, bias=bias, ) else: self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=bias, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=bias, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) class FlashLlamaLayer(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.self_attn = FlashLlamaAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = LlamaMLP(prefix=f"{prefix}.mlp", config=config, weights=weights) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) mlp_output = self.mlp(normed_attn_res_output) return mlp_output, attn_res class FlashLlamaModel(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList( [ FlashLlamaLayer( prefix=( f"model.layers.{layer_id}" if not prefix else f"{prefix}.model.layers.{layer_id}" ), config=config, weights=weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastRMSNorm.load( prefix="model.norm" if not prefix else f"{prefix}.model.norm", weights=weights, eps=config.rms_norm_eps, ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = inputs_embeds # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashLlamaForCausalLM(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding( prefix=( "model.embed_tokens" if not prefix else f"{prefix}.model.embed_tokens" ), weights=weights, ) self.model = FlashLlamaModel(prefix, config, weights) if config.tie_word_embeddings: suffix = "model.embed_tokens" else: suffix = "lm_head" self.lm_head = SpeculativeHead.load( config, prefix=suffix if not prefix else f"{prefix}.suffix", weights=weights, ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor] = None, lm_head_indices: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: inputs_embeds = self.embed_tokens(input_ids) hidden_states = self.model( inputs_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, true_max_s=max_s, prefill_cache_indices=prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_llama_modeling.py", "repo_id": "text-generation-inference", "token_count": 6754 }
418
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Llava-NeXT model.""" from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.image_processing_utils import select_best_resolution from text_generation_server.models.custom_modeling.vlm import ( load_text_model, load_vision_model, ) from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelRowLinear, ) def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): """ Calculate the shape of the image patch grid after the preprocessing for images of any resolution. Args: image_size (`tuple`): The size of the input image in the format (width, height). grid_pinpoints (`List`): A list containing possible resolutions. Each item in the list should be a tuple or list of the form `(height, width)`. patch_size (`int`): The size of each image patch. Returns: tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): raise ValueError("grid_pinpoints should be a list of tuples or lists") height, width = select_best_resolution(image_size, grid_pinpoints) return height // patch_size, width // patch_size def unpad_image(tensor, original_size): """ Unpads a PyTorch tensor of a padded and resized image. Args: tensor (`torch.Tensor`): The image tensor, assumed to be of shape (num_channels, height, width). original_size (`tuple`): The original size of the image (height, width). Returns: `torch.Tensor`: The unpadded image tensor. """ original_height, original_width = original_size current_height, current_width = tensor.shape[1:] original_aspect_ratio = original_width / original_height current_aspect_ratio = current_width / current_height if original_aspect_ratio > current_aspect_ratio: scale_factor = current_width / original_width new_height = int(original_height * scale_factor) padding = (current_height - new_height) // 2 unpadded_tensor = tensor[:, padding : current_height - padding, :] else: scale_factor = current_height / original_height new_width = int(original_width * scale_factor) padding = (current_width - new_width) // 2 unpadded_tensor = tensor[:, :, padding : current_width - padding] return unpadded_tensor # Copied from transformers.models.llava.modeling_llava.LlavaMultiModalProjector with Llava->LlavaNext class LlavaNextMultiModalProjector(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.linear_1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.linear_1", config=config, weights=weights, bias=True ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = TensorParallelRowLinear.load( prefix=f"{prefix}.linear_2", config=config, weights=weights, bias=True ) def forward(self, image_features): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class LlavaNextForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() config.vision_config.quantize = config.quantize vision_config = config.vision_config # Instead of selecting in hidden_states[-2]. # Instead compute only the n -2 + 1 layers and don't pool if config.vision_feature_layer < 0: vision_config.num_hidden_layers += config.vision_feature_layer + 1 else: vision_config.num_hidden_layers = config.vision_feature_layer + 1 self.vision_tower = load_vision_model( prefix="vision_tower" if not prefix else f"{prefix}.vision_tower", config=config.vision_config, weights=weights, ) self.multi_modal_projector = LlavaNextMultiModalProjector( prefix="multi_modal_projector", config=config, weights=weights ) self.image_newline = weights.get_tensor("image_newline") self.vocab_size = config.text_config.vocab_size self.config = config config.text_config.quantize = config.quantize config.text_config.use_medusa = config.use_medusa self.language_model = load_text_model( prefix="language_model" if not prefix else f"{prefix}.language_model", config=config.text_config, weights=weights, ) self.pad_token_id = ( config.pad_token_id if config.pad_token_id is not None else -1 ) def _merge_input_ids_with_image_features( self, input_ids: torch.Tensor, inputs_embeds: torch.Tensor, image_features: torch.Tensor, ): """In place merges in vision_embeddings with inputs_embeds.""" mask = input_ids == self.config.image_token_index # Let's pray we have enabled enough slots ! try: inputs_embeds[mask] = image_features.view(-1, image_features.shape[-1]) except Exception as e: raise RuntimeError( f"Cannot fill images right now. If error happens at warmup, make sure you have enough `--max-input-tokens` to handle images. If error happens at regular runtime, please fill in an issue: {e}" ) return inputs_embeds def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, pixel_values: torch.FloatTensor = None, # Unused for this model pixel_attention_mask=None, image_sizes: Optional[torch.LongTensor] = None, ): inputs_embeds = self.language_model.embed_tokens(input_ids) if pixel_values is not None and len(pixel_values) > 0: # num_special_image_tokens = (input_ids == self.config.image_token_index).sum() # assert num_special_image_tokens == len(pixel_values), f"Received {num_special_image_tokens} for {len(pixel_values)} images, this is invalid" # 1. Extract the input embeddings # 2. Merge text and images num_images, num_patches, channels, height, width = pixel_values.shape pixel_values = pixel_values.view( num_images * num_patches, channels, height, width ) image_features = self.vision_tower(pixel_values) # selected_image_feature = image_features.hidden_states[self.config.vision_feature_layer] # Already done within the clip model selected_image_feature = image_features.last_hidden_state if self.config.vision_feature_select_strategy == "default": selected_image_feature = selected_image_feature[:, 1:] elif self.config.vision_feature_select_strategy == "full": selected_image_feature = selected_image_feature else: raise RuntimeError( f"Strategy `{self.config.vision_feature_select_strategy}` is not supported/valid." ) image_features = self.multi_modal_projector(selected_image_feature) # split up image_features for each of the individual images # hence we get a list of image_features, each of shape (5, num_patches, hidden_size) # if we assume each image has 5 image features (base image + 4 patches) split_sizes = [num_patches] * num_images image_features = torch.split(image_features, split_sizes, dim=0) # NOTE we only support multimodal_patch_merge_type == "spatial_unpad" height = width = ( self.config.vision_config.image_size // self.config.vision_config.patch_size ) new_image_features = [] for image_idx, image_feature in enumerate(image_features): if image_feature.shape[0] > 1: base_image_feature = image_feature[0] image_feature = image_feature[1:] if height * width != base_image_feature.shape[0]: raise ValueError( "The number of patches is not consistent with the image size." ) num_patch_height, num_patch_width = get_anyres_image_grid_shape( image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size, ) image_feature = image_feature.view( num_patch_height, num_patch_width, height, width, -1 ) image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() image_feature = image_feature.flatten(1, 2).flatten(2, 3) image_feature = unpad_image(image_feature, image_sizes[image_idx]) image_feature = torch.cat( ( image_feature, self.image_newline[:, None, None].expand( *image_feature.shape[:-1], 1 ), ), dim=-1, ) image_feature = image_feature.flatten(1, 2).transpose(0, 1) image_feature = torch.cat( (base_image_feature, image_feature), dim=0 ) else: image_feature = image_feature[0] image_feature = torch.cat( (image_feature, self.image_newline[None]), dim=0 ) new_image_features.append(image_feature) image_features = torch.stack(new_image_features, dim=0) inputs_embeds = self._merge_input_ids_with_image_features( input_ids, inputs_embeds, image_features ) hidden_states = self.language_model.model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, true_max_s=max_s, prefill_cache_indices=None, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.language_model.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/llava_next.py", "repo_id": "text-generation-inference", "token_count": 5295 }
419
import torch import torch.distributed from opentelemetry import trace from transformers import AutoConfig, AutoTokenizer from typing import Optional from text_generation_server.models import FlashCausalLM from text_generation_server.models.custom_modeling.flash_phi_modeling import ( FlashPhiForCausalLM, PhiConfig, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) tracer = trace.get_tracer(__name__) class FlashPhi(FlashCausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: raise NotImplementedError("FlashPhi is only available on GPU") tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = PhiConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id, revision) model = FlashPhiForCausalLM(config, weights) if use_medusa: from text_generation_server.utils.medusa import MedusaModel from huggingface_hub import hf_hub_download import json import os from pathlib import Path is_local_model = ( Path(use_medusa).exists() and Path(use_medusa).is_dir() ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None if not is_local_model: medusa_config = hf_hub_download( use_medusa, revision=revision, filename="config.json" ) medusa_head = hf_hub_download( use_medusa, revision=revision, filename="medusa_lm_head.pt" ) else: medusa_config = str(Path(use_medusa) / "config.json") medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt") with open(medusa_config, "r") as f: config = json.load(f) medusa_sf = medusa_head[: -len(".pt")] + ".safetensors" weights = Weights( [medusa_sf], device, dtype, process_group=self.process_group ) lm_head = model.lm_head model.lm_head = MedusaModel(config, weights, lm_head) torch.distributed.barrier(group=self.process_group) super(FlashPhi, self).__init__( model=model, tokenizer=tokenizer, num_layers=len(model.model.layers), num_kv_heads=model.model.num_key_value_heads, head_size=model.model.head_size, dtype=dtype, device=device, rank=rank, world_size=world_size, )
text-generation-inference/server/text_generation_server/models/flash_phi.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/flash_phi.py", "repo_id": "text-generation-inference", "token_count": 1738 }
420
import torch import torch.distributed from transformers import AutoConfig, AutoTokenizer from typing import Optional, List, Tuple from text_generation_server.models import CausalLM from text_generation_server.models.custom_modeling.phi_modeling import ( PhiConfig, PhiForCausalLM, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class Phi(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, _rank, _world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device("cuda") dtype = torch.float16 if dtype is None else dtype else: if quantize: raise ValueError("quantization is not available on CPU") device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = PhiConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) tokenizer.bos_token_id = config.bos_token_id tokenizer.eos_token_id = config.eos_token_id tokenizer.pad_token = tokenizer.eos_token config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) model = PhiForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, )
text-generation-inference/server/text_generation_server/models/phi.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/phi.py", "repo_id": "text-generation-inference", "token_count": 1000 }
421
from functools import lru_cache @lru_cache(10) def log_once(log, msg: str): log(msg)
text-generation-inference/server/text_generation_server/utils/log.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/log.py", "repo_id": "text-generation-inference", "token_count": 40 }
422
target .yarn
tokenizers/bindings/node/.prettierignore/0
{ "file_path": "tokenizers/bindings/node/.prettierignore", "repo_id": "tokenizers", "token_count": 5 }
423
{ "name": "tokenizers-darwin-x64", "version": "0.13.4-rc1", "os": [ "darwin" ], "cpu": [ "x64" ], "main": "tokenizers.darwin-x64.node", "files": [ "tokenizers.darwin-x64.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
tokenizers/bindings/node/npm/darwin-x64/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/darwin-x64/package.json", "repo_id": "tokenizers", "token_count": 268 }
424
{ "name": "tokenizers-win32-ia32-msvc", "version": "0.13.4-rc1", "os": [ "win32" ], "cpu": [ "ia32" ], "main": "tokenizers.win32-ia32-msvc.node", "files": [ "tokenizers.win32-ia32-msvc.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers" }
tokenizers/bindings/node/npm/win32-ia32-msvc/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/win32-ia32-msvc/package.json", "repo_id": "tokenizers", "token_count": 277 }
425
use crate::decoders::Decoder; use crate::encoding::{JsEncoding, JsTruncationDirection, JsTruncationStrategy}; use crate::models::Model; use crate::normalizers::Normalizer; use crate::pre_tokenizers::PreTokenizer; use crate::processors::Processor; use crate::tasks::tokenizer::{DecodeBatchTask, DecodeTask, EncodeBatchTask, EncodeTask}; use crate::trainers::Trainer; use std::collections::HashMap; use tokenizers::Model as ModelTrait; use napi::bindgen_prelude::*; use napi_derive::napi; use std::sync::{Arc, RwLock}; use tokenizers as tk; #[napi] #[derive(Default)] pub enum PaddingDirection { #[default] Left, Right, } impl From<PaddingDirection> for tk::PaddingDirection { fn from(w: PaddingDirection) -> Self { match w { PaddingDirection::Left => tk::PaddingDirection::Left, PaddingDirection::Right => tk::PaddingDirection::Right, } } } impl TryFrom<String> for PaddingDirection { type Error = Error; fn try_from(w: String) -> Result<Self> { match w.as_str() { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), s => Err(Error::from_reason(format!( "{s:?} is not a valid direction" ))), } } } #[napi(object)] #[derive(Default)] pub struct PaddingOptions { pub max_length: Option<u32>, pub direction: Option<Either<String, PaddingDirection>>, pub pad_to_multiple_of: Option<u32>, pub pad_id: Option<u32>, pub pad_type_id: Option<u32>, pub pad_token: Option<String>, } impl TryFrom<PaddingOptions> for tk::PaddingParams { type Error = Error; fn try_from(value: PaddingOptions) -> Result<Self> { let direction = match value.direction { Some(either) => match either { Either::A(string) => { let direction: PaddingDirection = string.try_into()?; direction.into() } Either::B(direction) => direction.into(), }, None => tk::PaddingDirection::Right, }; Ok(Self { pad_to_multiple_of: value.pad_to_multiple_of.map(|s| s as usize), pad_id: value.pad_id.unwrap_or_default(), pad_type_id: value.pad_type_id.unwrap_or_default(), pad_token: value.pad_token.unwrap_or("[PAD]".to_string()), direction, strategy: match value.max_length { Some(length) => tk::PaddingStrategy::Fixed(length as usize), None => tk::PaddingStrategy::BatchLongest, }, }) } } #[napi(object)] #[derive(Default)] pub struct EncodeOptions { pub is_pretokenized: Option<bool>, pub add_special_tokens: Option<bool>, } #[derive(Default)] struct EncodeOptionsDef { // TODO // is_pretokenized: bool, add_special_tokens: bool, } impl From<EncodeOptions> for EncodeOptionsDef { fn from(value: EncodeOptions) -> Self { EncodeOptionsDef { // TODO // is_pretokenized: value.is_pretokenized.unwrap_or(false), add_special_tokens: value.add_special_tokens.unwrap_or(true), } } } #[napi(object)] #[derive(Default)] pub struct TruncationOptions { pub max_length: Option<u32>, pub strategy: Option<JsTruncationStrategy>, pub direction: Option<Either<String, JsTruncationDirection>>, pub stride: Option<u32>, } impl TryFrom<TruncationOptions> for tk::TruncationParams { type Error = Error; fn try_from(value: TruncationOptions) -> Result<Self> { let direction = match value.direction { Some(either) => match either { Either::A(string) => { let direction: JsTruncationDirection = string.try_into()?; direction.into() } Either::B(direction) => direction.into(), }, None => Default::default(), }; Ok(Self { max_length: value.max_length.unwrap_or(0) as usize, strategy: value.strategy.map(|s| s.into()).unwrap_or_default(), direction, stride: value.stride.unwrap_or_default() as usize, }) } } #[napi(object)] pub struct AddedTokenOptions { pub single_word: Option<bool>, pub left_strip: Option<bool>, pub right_strip: Option<bool>, pub normalized: Option<bool>, } #[napi] #[derive(Clone)] pub struct AddedToken { token: tk::AddedToken, } #[napi] impl AddedToken { #[napi(constructor)] pub fn from(token: String, is_special: bool, options: Option<AddedTokenOptions>) -> Self { let mut token = tk::AddedToken::from(token, is_special); if let Some(options) = options { if let Some(sw) = options.single_word { token = token.single_word(sw); } if let Some(ls) = options.left_strip { token = token.lstrip(ls); } if let Some(rs) = options.right_strip { token = token.rstrip(rs); } if let Some(n) = options.normalized { token = token.normalized(n); } } Self { token } } #[napi] pub fn get_content(&self) -> String { self.token.content.clone() } } impl From<AddedToken> for tk::AddedToken { fn from(v: AddedToken) -> Self { v.token } } type RsTokenizer = tk::TokenizerImpl<Model, Normalizer, PreTokenizer, Processor, Decoder>; #[napi] #[derive(Clone)] pub struct Tokenizer { pub(crate) tokenizer: Arc<RwLock<RsTokenizer>>, } #[napi] impl Tokenizer { #[napi(constructor)] pub fn new(model: &Model) -> Self { Self { tokenizer: Arc::new(RwLock::new(tk::TokenizerImpl::new((*model).clone()))), } } #[napi] pub fn set_pre_tokenizer(&mut self, pre_tokenizer: &PreTokenizer) { self .tokenizer .write() .unwrap() .with_pre_tokenizer((*pre_tokenizer).clone()); } #[napi] pub fn set_decoder(&mut self, decoder: &Decoder) { self .tokenizer .write() .unwrap() .with_decoder((*decoder).clone()); } #[napi] pub fn set_model(&mut self, model: &Model) { self.tokenizer.write().unwrap().with_model((*model).clone()); } #[napi] pub fn set_post_processor(&mut self, post_processor: &Processor) { self .tokenizer .write() .unwrap() .with_post_processor((*post_processor).clone()); } #[napi] pub fn set_normalizer(&mut self, normalizer: &Normalizer) { self .tokenizer .write() .unwrap() .with_normalizer((*normalizer).clone()); } #[napi] pub fn save(&self, path: String, pretty: Option<bool>) -> Result<()> { let pretty = pretty.unwrap_or(false); self .tokenizer .read() .unwrap() .save(path, pretty) .map_err(|e| Error::from_reason(format!("{}", e))) } #[napi] pub fn add_added_tokens(&mut self, tokens: Vec<&AddedToken>) -> u32 { let tokens: Vec<_> = tokens .into_iter() .map(|tok| (*tok).clone().into()) .collect(); self.tokenizer.write().unwrap().add_tokens(&tokens) as u32 } #[napi] pub fn add_tokens(&mut self, tokens: Vec<String>) -> u32 { let tokens: Vec<_> = tokens .into_iter() .map(|tok| tk::AddedToken::from(tok, false)) .collect(); self.tokenizer.write().unwrap().add_tokens(&tokens) as u32 } #[napi(ts_return_type = "Promise<JsEncoding>")] pub fn encode( &self, #[napi(ts_arg_type = "InputSequence")] sentence: String, #[napi(ts_arg_type = "InputSequence | null")] pair: Option<String>, encode_options: Option<EncodeOptions>, ) -> AsyncTask<EncodeTask<'static>> { let options: EncodeOptionsDef = encode_options.unwrap_or_default().into(); let input: tk::EncodeInput = match pair { Some(pair) => (sentence, pair).into(), None => sentence.into(), }; AsyncTask::new(EncodeTask { tokenizer: (*self).clone(), input: Some(input), add_special_tokens: options.add_special_tokens, }) } #[napi(ts_return_type = "Promise<JsEncoding[]>")] pub fn encode_batch( &self, #[napi(ts_arg_type = "EncodeInput[]")] sentences: Vec<String>, encode_options: Option<EncodeOptions>, ) -> AsyncTask<EncodeBatchTask<'static>> { let options: EncodeOptionsDef = encode_options.unwrap_or_default().into(); let inputs: Vec<tk::EncodeInput> = sentences .into_iter() .map(|sentence| sentence.into()) .collect(); AsyncTask::new(EncodeBatchTask { tokenizer: (*self).clone(), inputs: Some(inputs), add_special_tokens: options.add_special_tokens, }) } #[napi(ts_return_type = "Promise<string>")] pub fn decode(&self, ids: Vec<u32>, skip_special_tokens: bool) -> AsyncTask<DecodeTask> { AsyncTask::new(DecodeTask { tokenizer: (*self).clone(), ids, skip_special_tokens, }) } #[napi(ts_return_type = "Promise<string[]>")] pub fn decode_batch( &self, ids: Vec<Vec<u32>>, skip_special_tokens: bool, ) -> AsyncTask<DecodeBatchTask> { AsyncTask::new(DecodeBatchTask { tokenizer: (*self).clone(), ids, skip_special_tokens, }) } #[napi(factory)] pub fn from_string(s: String) -> Result<Self> { let tokenizer: tk::tokenizer::TokenizerImpl< Model, Normalizer, PreTokenizer, Processor, Decoder, > = s .parse() .map_err(|e| Error::from_reason(format!("{}", e)))?; Ok(Self { tokenizer: Arc::new(RwLock::new(tokenizer)), }) } #[napi(factory)] pub fn from_file(file: String) -> Result<Self> { let tokenizer = tk::tokenizer::TokenizerImpl::from_file(file) .map_err(|e| Error::from_reason(format!("Error loading from file{}", e)))?; Ok(Self { tokenizer: Arc::new(RwLock::new(tokenizer)), }) } #[napi] pub fn add_special_tokens(&mut self, tokens: Vec<String>) { let tokens: Vec<_> = tokens .into_iter() .map(|s| tk::AddedToken::from(s, true)) .collect(); self.tokenizer.write().unwrap().add_special_tokens(&tokens); } #[napi] pub fn set_truncation( &mut self, max_length: u32, options: Option<TruncationOptions>, ) -> Result<()> { let mut options: tk::TruncationParams = if let Some(options) = options { options.try_into()? } else { Default::default() }; options.max_length = max_length as usize; self .tokenizer .write() .unwrap() .with_truncation(Some(options)) .unwrap(); Ok(()) } #[napi] pub fn disable_truncation(&mut self) { self .tokenizer .write() .unwrap() .with_truncation(None) .unwrap(); } #[napi] pub fn set_padding(&mut self, options: Option<PaddingOptions>) -> Result<()> { let options = if let Some(options) = options { Some(options.try_into()?) } else { None }; self.tokenizer.write().unwrap().with_padding(options); Ok(()) } #[napi] pub fn disable_padding(&mut self) { self.tokenizer.write().unwrap().with_padding(None); } #[napi] pub fn get_decoder(&self) -> Option<Decoder> { self.tokenizer.read().unwrap().get_decoder().cloned() } #[napi] pub fn get_normalizer(&self) -> Option<Normalizer> { self.tokenizer.read().unwrap().get_normalizer().cloned() } #[napi] pub fn get_pre_tokenizer(&self) -> Option<PreTokenizer> { self.tokenizer.read().unwrap().get_pre_tokenizer().cloned() } #[napi] pub fn get_post_processor(&self) -> Option<Processor> { self.tokenizer.read().unwrap().get_post_processor().cloned() } #[napi] pub fn get_vocab(&self, with_added_tokens: Option<bool>) -> HashMap<String, u32> { let with_added_tokens = with_added_tokens.unwrap_or(true); self.tokenizer.read().unwrap().get_vocab(with_added_tokens) } #[napi] pub fn get_vocab_size(&self, with_added_tokens: Option<bool>) -> u32 { self.get_vocab(with_added_tokens).len() as u32 } #[napi] pub fn id_to_token(&self, id: u32) -> Option<String> { self.tokenizer.read().unwrap().id_to_token(id) } #[napi] pub fn token_to_id(&self, token: String) -> Option<u32> { self.tokenizer.read().unwrap().token_to_id(&token) } #[napi] pub fn train(&mut self, files: Vec<String>) -> Result<()> { let mut trainer: Trainer = self .tokenizer .read() .unwrap() .get_model() .model .as_ref() .unwrap() .read() .unwrap() .get_trainer() .into(); self .tokenizer .write() .unwrap() .train_from_files(&mut trainer, files) .map_err(|e| Error::from_reason(format!("{}", e)))?; Ok(()) } #[napi] pub fn running_tasks(&self) -> u32 { std::sync::Arc::strong_count(&self.tokenizer) as u32 } #[napi] pub fn post_process( &self, encoding: &JsEncoding, pair: Option<&JsEncoding>, add_special_tokens: Option<bool>, ) -> Result<JsEncoding> { let add_special_tokens = add_special_tokens.unwrap_or(true); Ok( self .tokenizer .read() .unwrap() .post_process( (*encoding).clone().try_into()?, if let Some(pair) = pair { Some((*pair).clone().try_into()?) } else { None }, add_special_tokens, ) .map_err(|e| Error::from_reason(format!("{}", e)))? .into(), ) } } #[napi(object)] #[derive(Default)] pub struct JsFromPretrainedParameters { pub revision: Option<String>, pub auth_token: Option<String>, }
tokenizers/bindings/node/src/tokenizer.rs/0
{ "file_path": "tokenizers/bindings/node/src/tokenizer.rs", "repo_id": "tokenizers", "token_count": 5701 }
426
import argparse import glob from tokenizers import BertWordPieceTokenizer parser = argparse.ArgumentParser() parser.add_argument( "--files", default=None, metavar="path", type=str, required=True, help="The files to use as training; accept '**/*.txt' type of patterns \ if enclosed in quotes", ) parser.add_argument( "--out", default="./", type=str, help="Path to the output directory, where the files will be saved", ) parser.add_argument("--name", default="bert-wordpiece", type=str, help="The name of the output vocab files") args = parser.parse_args() files = glob.glob(args.files) if not files: print(f"File does not exist: {args.files}") exit(1) # Initialize an empty tokenizer tokenizer = BertWordPieceTokenizer( clean_text=True, handle_chinese_chars=True, strip_accents=True, lowercase=True, ) # And then train tokenizer.train( files, vocab_size=10000, min_frequency=2, show_progress=True, special_tokens=["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], limit_alphabet=1000, wordpieces_prefix="##", ) # Save the files tokenizer.save_model(args.out, args.name)
tokenizers/bindings/python/examples/train_bert_wordpiece.py/0
{ "file_path": "tokenizers/bindings/python/examples/train_bert_wordpiece.py", "repo_id": "tokenizers", "token_count": 472 }
427
# Generated content DO NOT EDIT class Model: """ Base class for all models The model represents the actual tokenization algorithm. This is the part that will contain and manage the learned vocabulary. This class cannot be constructed directly. Please use one of the concrete models. """ def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class BPE(Model): """ An implementation of the BPE (Byte-Pair Encoding) algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` merges (:obj:`List[Tuple[str, str]]`, `optional`): A list of pairs of tokens (:obj:`Tuple[str, str]`) :obj:`[("a", "b"),...]` cache_capacity (:obj:`int`, `optional`): The number of words that the BPE cache can contain. The cache allows to speed-up the process by keeping the result of the merge operations for a number of words. dropout (:obj:`float`, `optional`): A float between 0 and 1 that represents the BPE dropout to use. unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. continuing_subword_prefix (:obj:`str`, `optional`): The prefix to attach to subword units that don't represent a beginning of word. end_of_word_suffix (:obj:`str`, `optional`): The suffix to attach to subword units that represent an end of word. fuse_unk (:obj:`bool`, `optional`): Whether to fuse any subsequent unknown tokens into a single one byte_fallback (:obj:`bool`, `optional`): Whether to use spm byte-fallback trick (defaults to False) ignore_merges (:obj:`bool`, `optional`): Whether or not to match tokens with the vocab before using merges. """ def __init__( self, vocab=None, merges=None, cache_capacity=None, dropout=None, unk_token=None, continuing_subword_prefix=None, end_of_word_suffix=None, fuse_unk=None, byte_fallback=False, ignore_merges=False, ): pass @staticmethod def from_file(cls, vocab, merge, **kwargs): """ Instantiate a BPE model from the given files. This method is roughly equivalent to doing:: vocab, merges = BPE.read_file(vocab_filename, merges_filename) bpe = BPE(vocab, merges) If you don't need to keep the :obj:`vocab, merges` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.BPE.read_file` to initialize a :class:`~tokenizers.models.BPE` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: :class:`~tokenizers.models.BPE`: An instance of BPE loaded from these files """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(self, vocab, merges): """ Read a :obj:`vocab.json` and a :obj:`merges.txt` files This method provides a way to read and parse the content of these files, returning the relevant data structures. If you want to instantiate some BPE models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file merges (:obj:`str`): The path to a :obj:`merges.txt` file Returns: A :obj:`Tuple` with the vocab and the merges: The vocabulary and merges loaded into memory """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class Unigram(Model): """ An implementation of the Unigram algorithm Args: vocab (:obj:`List[Tuple[str, float]]`, `optional`, `optional`): A list of vocabulary items and their relative score [("am", -0.2442),...] """ def __init__(self, vocab, unk_id, byte_fallback): pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordLevel(Model): """ An implementation of the WordLevel algorithm Most simple tokenizer model based on mapping tokens to their corresponding id. Args: vocab (:obj:`str`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. """ def __init__(self, vocab, unk_token): pass @staticmethod def from_file(vocab, unk_token): """ Instantiate a WordLevel model from the given file This method is roughly equivalent to doing:: vocab = WordLevel.read_file(vocab_filename) wordlevel = WordLevel(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordLevel.read_file` to initialize a :class:`~tokenizers.models.WordLevel` Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :class:`~tokenizers.models.WordLevel`: An instance of WordLevel loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.json` This method provides a way to read and parse the content of a vocabulary file, returning the relevant data structures. If you want to instantiate some WordLevel models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.json` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass class WordPiece(Model): """ An implementation of the WordPiece algorithm Args: vocab (:obj:`Dict[str, int]`, `optional`): A dictionnary of string keys and their ids :obj:`{"am": 0,...}` unk_token (:obj:`str`, `optional`): The unknown token to be used by the model. max_input_chars_per_word (:obj:`int`, `optional`): The maximum number of characters to authorize in a single word. """ def __init__(self, vocab, unk_token, max_input_chars_per_word): pass @staticmethod def from_file(vocab, **kwargs): """ Instantiate a WordPiece model from the given file This method is roughly equivalent to doing:: vocab = WordPiece.read_file(vocab_filename) wordpiece = WordPiece(vocab) If you don't need to keep the :obj:`vocab` values lying around, this method is more optimized than manually calling :meth:`~tokenizers.models.WordPiece.read_file` to initialize a :class:`~tokenizers.models.WordPiece` Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :class:`~tokenizers.models.WordPiece`: An instance of WordPiece loaded from file """ pass def get_trainer(self): """ Get the associated :class:`~tokenizers.trainers.Trainer` Retrieve the :class:`~tokenizers.trainers.Trainer` associated to this :class:`~tokenizers.models.Model`. Returns: :class:`~tokenizers.trainers.Trainer`: The Trainer used to train this model """ pass def id_to_token(self, id): """ Get the token associated to an ID Args: id (:obj:`int`): An ID to convert to a token Returns: :obj:`str`: The token associated to the ID """ pass @staticmethod def read_file(vocab): """ Read a :obj:`vocab.txt` file This method provides a way to read and parse the content of a standard `vocab.txt` file as used by the WordPiece Model, returning the relevant data structures. If you want to instantiate some WordPiece models from memory, this method gives you the expected input from the standard files. Args: vocab (:obj:`str`): The path to a :obj:`vocab.txt` file Returns: :obj:`Dict[str, int]`: The vocabulary as a :obj:`dict` """ pass def save(self, folder, prefix): """ Save the current model Save the current model in the given folder, using the given prefix for the various files that will get created. Any file with the same name that already exists in this folder will be overwritten. Args: folder (:obj:`str`): The path to the target folder in which to save the various files prefix (:obj:`str`, `optional`): An optional prefix, used to prefix each file name Returns: :obj:`List[str]`: The list of saved files """ pass def token_to_id(self, tokens): """ Get the ID associated to a token Args: token (:obj:`str`): A token to convert to an ID Returns: :obj:`int`: The ID associated to the token """ pass def tokenize(self, sequence): """ Tokenize a sequence Args: sequence (:obj:`str`): A sequence to tokenize Returns: A :obj:`List` of :class:`~tokenizers.Token`: The generated tokens """ pass
tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/models/__init__.pyi", "repo_id": "tokenizers", "token_count": 7632 }
428
import tokenizers from argparse import ArgumentParser import sentencepiece as spm from collections import Counter import json import os import datetime try: from termcolor import colored has_color = True except Exception: has_color = False def main(): parser = ArgumentParser("SentencePiece parity checker") parser.add_argument( "--input-file", "-i", type=str, required=True, help="Which files do you want to train from", ) parser.add_argument( "--model-file", "-m", type=str, required=False, default=None, help="Use a pretrained token file", ) parser.add_argument( "--model-prefix", type=str, default="spm_parity", help="Model prefix for spm_train", ) parser.add_argument( "--vocab-size", "-v", type=int, default=8000, help="Vocab size for spm_train", ) parser.add_argument( "--verbose", action="store_true", help="Verbosity", ) parser.add_argument( "--train", action="store_true", help="Instead of checking the encoder part, we check the trainer part", ) parser.add_argument( "--from-spm", action="store_true", help="Directly load the spm file with it's own normalizer", ) args = parser.parse_args() trained = False if args.model_file is None: spm.SentencePieceTrainer.Train( f"--input={args.input_file} --model_prefix={args.model_prefix}" f" --character_coverage=1.0" f" --max_sentence_length=40000" f" --num_threads=1" f" --vocab_size={args.vocab_size}" ) trained = True args.model_file = f"{args.model_prefix}.model" try: if args.train: check_train(args) else: check_encode(args) finally: if trained: os.remove(f"{args.model_prefix}.model") os.remove(f"{args.model_prefix}.vocab") def check_train(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) tokenizer = tokenizers.SentencePieceUnigramTokenizer() tokenizer.train(args.input_file, show_progress=False) spm_tokens = 0 tokenizer_tokens = 0 with open(args.input_file, "r") as f: for i, line in enumerate(f): line = line.strip() ids = sp.EncodeAsIds(line) encoded = tokenizer.encode(line) spm_tokens += len(ids) tokenizer_tokens += len(encoded.ids) vocab = [0 for i in range(args.vocab_size)] spm_vocab = [0 for i in range(args.vocab_size)] for token, index in tokenizer.get_vocab().items(): vocab[index] = token for i in range(args.vocab_size): spm_vocab[i] = sp.id_to_piece(i) # 0 is unk in tokenizers, 0, 1, 2 are unk bos, eos in spm by default. for i, (token, spm_token) in enumerate(zip(vocab[1:], spm_vocab[3:])): if token != spm_token: print(f"First different token is token {i} ({token} != {spm_token})") break print(f"Tokenizer used {tokenizer_tokens}, where spm used {spm_tokens}") assert tokenizer_tokens < spm_tokens, "Our trainer should be at least more efficient than the SPM one" print("Ok our trainer is at least more efficient than the SPM one") def check_diff(spm_diff, tok_diff, sp, tok): if spm_diff == list(reversed(tok_diff)): # AAA -> AA+A vs A+AA case. return True elif len(spm_diff) == len(tok_diff) and tok.decode(spm_diff) == tok.decode(tok_diff): # Second order OK # Barrich -> Barr + ich vs Bar + rich return True spm_reencoded = sp.encode(sp.decode(spm_diff)) tok_reencoded = tok.encode(tok.decode(spm_diff)).ids if spm_reencoded != spm_diff and spm_reencoded == tok_reencoded: # Type 3 error. # Snehagatha -> # Sne, h, aga, th, a # Sne, ha, gat, ha # Encoding the wrong with sp does not even recover what spm gave us # It fits tokenizer however... return True return False def check_details(line, spm_ids, tok_ids, sp, tok): # Encoding can be the same with same result AAA -> A + AA vs AA + A # We can check that we use at least exactly the same number of tokens. for i, (spm_id, tok_id) in enumerate(zip(spm_ids, tok_ids)): if spm_id != tok_id: break first = i for i, (spm_id, tok_id) in enumerate(zip(reversed(spm_ids), reversed(tok_ids))): if spm_id != tok_id: break last = len(spm_ids) - i spm_diff = spm_ids[first:last] tok_diff = tok_ids[first:last] if check_diff(spm_diff, tok_diff, sp, tok): return True if last - first > 5: # We might have twice a single problem, attempt to subdivide the disjointed tokens into smaller problems spms = Counter(spm_ids[first:last]) toks = Counter(tok_ids[first:last]) removable_tokens = {spm_ for (spm_, si) in spms.items() if toks.get(spm_, 0) == si} min_width = 3 for i in range(last - first - min_width): if all(spm_ids[first + i + j] in removable_tokens for j in range(min_width)): possible_matches = [ k for k in range(last - first - min_width) if tok_ids[first + k : first + k + min_width] == spm_ids[first + i : first + i + min_width] ] for j in possible_matches: if check_diff(spm_ids[first : first + i], tok_ids[first : first + j], sp, tok) and check_details( line, spm_ids[first + i : last], tok_ids[first + j : last], sp, tok, ): return True print(f"Spm: {[tok.decode([spm_ids[i]]) for i in range(first, last)]}") try: print(f"Tok: {[tok.decode([tok_ids[i]]) for i in range(first, last)]}") except Exception: pass ok_start = tok.decode(spm_ids[:first]) ok_end = tok.decode(spm_ids[last:]) wrong = tok.decode(spm_ids[first:last]) print() if has_color: print(f"{colored(ok_start, 'grey')}{colored(wrong, 'red')}{colored(ok_end, 'grey')}") else: print(wrong) return False def check_encode(args): sp = spm.SentencePieceProcessor() sp.Load(args.model_file) if args.from_spm: tok = tokenizers.SentencePieceUnigramTokenizer.from_spm(args.model_file) else: vocab = [(sp.id_to_piece(i), sp.get_score(i)) for i in range(sp.piece_size())] unk_id = sp.unk_id() tok = tokenizers.SentencePieceUnigramTokenizer(vocab, unk_id) perfect = 0 imperfect = 0 wrong = 0 now = datetime.datetime.now spm_total_time = datetime.timedelta(seconds=0) tok_total_time = datetime.timedelta(seconds=0) with open(args.input_file, "r", encoding="utf-8-sig") as f: for i, line in enumerate(f): line = line.strip() start = now() ids = sp.EncodeAsIds(line) spm_time = now() encoded = tok.encode(line) tok_time = now() spm_total_time += spm_time - start tok_total_time += tok_time - spm_time if args.verbose: if i % 10000 == 0: print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") print(f"SPM: {spm_total_time} - TOK: {tok_total_time}") if ids != encoded.ids: if check_details(line, ids, encoded.ids, sp, tok): imperfect += 1 continue else: wrong += 1 else: perfect += 1 assert ( ids == encoded.ids ), f"line {i}: {line} : \n\n{ids}\n{encoded.ids}\n{list(zip(encoded.ids, encoded.tokens))}" print(f"({perfect} / {imperfect} / {wrong} ----- {perfect + imperfect + wrong})") total = perfect + imperfect + wrong print(f"Accuracy {perfect * 100 / total:.2f} Slowdown : {tok_total_time/ spm_total_time:.2f}") if __name__ == "__main__": main()
tokenizers/bindings/python/scripts/spm_parity_check.py/0
{ "file_path": "tokenizers/bindings/python/scripts/spm_parity_check.py", "repo_id": "tokenizers", "token_count": 4110 }
429
use tokenizers as tk; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use super::{ DestroyPtr, PyNormalizedString, PyNormalizedStringRefMut, RefMutContainer, RefMutGuard, }; use crate::encoding::PyEncoding; use crate::error::ToPyResult; use crate::token::PyToken; use tk::{OffsetReferential, OffsetType, Offsets, PreTokenizedString, Token}; fn split(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`split` expect a callable with the signature: \ `fn(index: int, normalized: NormalizedString) -> List[NormalizedString]`", )) } else { ToPyResult(pretok.split(|i, normalized| { let output = func.call((i, PyNormalizedString::from(normalized)), None)?; Ok(output .extract::<Vec<PyNormalizedString>>()? .into_iter() .map(tk::NormalizedString::from)) })) .into() } } fn normalize(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`normalize` expect a callable with the signature: \ `fn(normalized: NormalizedString)`", )) } else { ToPyResult(pretok.normalize(|normalized| { let norm = PyNormalizedStringRefMut::new(normalized); func.call((norm.get(),), None)?; Ok(()) })) .into() } } fn tokenize(pretok: &mut PreTokenizedString, func: &Bound<'_, PyAny>) -> PyResult<()> { if !func.is_callable() { Err(exceptions::PyTypeError::new_err( "`tokenize` expect a callable with the signature: \ `fn(str) -> List[Token]`", )) } else { ToPyResult(pretok.tokenize(|normalized| { let output = func.call((normalized.get(),), None)?; Ok(output .extract::<&PyList>()? .into_iter() .map(|obj| Ok(Token::from(obj.extract::<PyToken>()?))) .collect::<PyResult<Vec<_>>>()?) })) .into() } } /// This is an enum #[derive(Clone)] pub struct PyOffsetReferential(OffsetReferential); impl FromPyObject<'_> for PyOffsetReferential { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "original" => Ok(OffsetReferential::Original), "normalized" => Ok(OffsetReferential::Normalized), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetReferential, expected one of `original, normalized`", )), }?)) } } #[derive(Clone)] pub struct PyOffsetType(OffsetType); impl FromPyObject<'_> for PyOffsetType { fn extract(obj: &PyAny) -> PyResult<Self> { let s = obj.extract::<&str>()?; Ok(Self(match s { "byte" => Ok(OffsetType::Byte), "char" => Ok(OffsetType::Char), _ => Err(exceptions::PyValueError::new_err( "Wrong value for OffsetType, expected one of `byte, char`", )), }?)) } } type PySplit = (String, Offsets, Option<Vec<PyToken>>); fn get_splits( pretok: &PreTokenizedString, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { pretok .get_splits(offset_referential.0, offset_type.0) .into_iter() .map(|(s, o, t)| { ( s.to_owned(), o, t.as_ref() .map(|tokens| tokens.iter().map(|t| t.clone().into()).collect()), ) }) .collect() } fn to_encoding( pretok: &PreTokenizedString, type_id: u32, word_idx: Option<u32>, ) -> PyResult<PyEncoding> { Ok(ToPyResult( pretok .clone() .into_encoding(word_idx, type_id, tk::OffsetType::Char), ) .into_py()? .into()) } /// PreTokenizedString /// /// Wrapper over a string, that provides a way to normalize, pre-tokenize, tokenize the /// underlying string, while keeping track of the alignment information (offsets). /// /// The PreTokenizedString manages what we call `splits`. Each split represents a substring /// which is a subpart of the original string, with the relevant offsets and tokens. /// /// When calling one of the methods used to modify the PreTokenizedString (namely one of /// `split`, `normalize` or `tokenize), only the `splits` that don't have any associated /// tokens will get modified. /// /// Args: /// sequence: str: /// The string sequence used to initialize this PreTokenizedString #[pyclass(module = "tokenizers", name = "PreTokenizedString")] pub struct PyPreTokenizedString { pub(crate) pretok: tk::PreTokenizedString, } impl From<PreTokenizedString> for PyPreTokenizedString { fn from(pretok: PreTokenizedString) -> Self { Self { pretok } } } impl From<PyPreTokenizedString> for PreTokenizedString { fn from(pretok: PyPreTokenizedString) -> Self { pretok.pretok } } #[pymethods] impl PyPreTokenizedString { #[new] #[pyo3(text_signature = "(self, sequence)")] fn new(s: &str) -> Self { PreTokenizedString::from(s).into() } /// Split the PreTokenizedString using the given `func` /// /// Args: /// func: Callable[[index, NormalizedString], List[NormalizedString]]: /// The function used to split each underlying split. /// It is expected to return a list of `NormalizedString`, that represent the new /// splits. If the given `NormalizedString` does not need any splitting, we can /// just return it directly. /// In order for the offsets to be tracked accurately, any returned `NormalizedString` /// should come from calling either `.split` or `.slice` on the received one. #[pyo3(text_signature = "(self, func)")] fn split(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { split(&mut self.pretok, func) } /// Normalize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[NormalizedString], None]: /// The function used to normalize each underlying split. This function /// does not need to return anything, just calling the methods on the provided /// NormalizedString allow its modification. #[pyo3(text_signature = "(self, func)")] fn normalize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { normalize(&mut self.pretok, func) } /// Tokenize each split of the `PreTokenizedString` using the given `func` /// /// Args: /// func: Callable[[str], List[Token]]: /// The function used to tokenize each underlying split. This function must return /// a list of Token generated from the input str. #[pyo3(text_signature = "(self, func)")] fn tokenize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { tokenize(&mut self.pretok, func) } /// Return an Encoding generated from this PreTokenizedString /// /// Args: /// type_id: int = 0: /// The type_id to be used on the generated Encoding. /// /// word_idx: Optional[int] = None: /// An optional word index to be used for each token of this Encoding. If provided, /// all the word indices in the generated Encoding will use this value, instead /// of the one automatically tracked during pre-tokenization. /// /// Returns: /// An Encoding #[pyo3(signature = (type_id = 0, word_idx = None))] #[pyo3(text_signature = "(self, type_id=0, word_idx=None)")] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { to_encoding(&self.pretok, type_id, word_idx) } /// Get the splits currently managed by the PreTokenizedString /// /// Args: /// offset_referential: :obj:`str` /// Whether the returned splits should have offsets expressed relative /// to the original string, or the normalized one. choices: "original", "normalized". /// /// offset_type: :obj:`str` /// Whether the returned splits should have offsets expressed in bytes or chars. /// When slicing an str, we usually want to use chars, which is the default value. /// Now in some cases it might be interesting to get these offsets expressed in bytes, /// so it is possible to change this here. /// choices: "char", "bytes" /// /// Returns /// A list of splits #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] #[pyo3(text_signature = "(self, offset_referential=\"original\", offset_type=\"char\")")] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> Vec<PySplit> { get_splits(&self.pretok, offset_referential, offset_type) } } #[pyclass(module = "tokenizers", name = "PreTokenizedString")] #[derive(Clone)] pub struct PyPreTokenizedStringRefMut { inner: RefMutContainer<PreTokenizedString>, } impl DestroyPtr for PyPreTokenizedStringRefMut { fn destroy(&mut self) { self.inner.destroy(); } } impl PyPreTokenizedStringRefMut { pub fn new(pretok: &mut tk::PreTokenizedString) -> RefMutGuard<Self> { // SAFETY: This is safe because we return a RefMutGuard here. // The compiler will make sure the &mut stays valid as necessary. RefMutGuard::new(Self { inner: RefMutContainer::new(pretok), }) } pub fn destroyed_error() -> PyErr { exceptions::PyException::new_err( "Cannot use a PreTokenizedStringRefMut outside `pre_tokenize`", ) } } #[pymethods] impl PyPreTokenizedStringRefMut { fn split(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| split(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn normalize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| normalize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } fn tokenize(&mut self, func: &Bound<'_, PyAny>) -> PyResult<()> { self.inner .map_mut(|pretok| tokenize(pretok, func)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = (type_id = 0, word_idx = None))] fn to_encoding(&self, type_id: u32, word_idx: Option<u32>) -> PyResult<PyEncoding> { self.inner .map(|pretok| to_encoding(pretok, type_id, word_idx)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error)? } #[pyo3(signature = ( offset_referential = PyOffsetReferential(OffsetReferential::Original), offset_type = PyOffsetType(OffsetType::Char) ))] fn get_splits( &self, offset_referential: PyOffsetReferential, offset_type: PyOffsetType, ) -> PyResult<Vec<PySplit>> { self.inner .map(|pretok| get_splits(pretok, offset_referential, offset_type)) .ok_or_else(PyPreTokenizedStringRefMut::destroyed_error) } }
tokenizers/bindings/python/src/utils/pretokenization.rs/0
{ "file_path": "tokenizers/bindings/python/src/utils/pretokenization.rs", "repo_id": "tokenizers", "token_count": 4930 }
430
from tokenizers import Tokenizer from ..utils import data_dir, doc_wiki_tokenizer disable_printing = True original_print = print def print(*args, **kwargs): if not disable_printing: original_print(*args, **kwargs) class TestQuicktour: # This method contains everything we don't want to run @staticmethod def slow_train(): tokenizer, trainer = TestQuicktour.get_tokenizer_trainer() # START train files = [f"data/wikitext-103-raw/wiki.{split}.raw" for split in ["test", "train", "valid"]] tokenizer.train(files, trainer) # END train # START save tokenizer.save("data/tokenizer-wiki.json") # END save @staticmethod def get_tokenizer_trainer(): # START init_tokenizer from tokenizers import Tokenizer from tokenizers.models import BPE tokenizer = Tokenizer(BPE(unk_token="[UNK]")) # END init_tokenizer # START init_trainer from tokenizers.trainers import BpeTrainer trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) # END init_trainer # START init_pretok from tokenizers.pre_tokenizers import Whitespace tokenizer.pre_tokenizer = Whitespace() # END init_pretok return tokenizer, trainer def test_quicktour(self, doc_wiki_tokenizer): def print(*args, **kwargs): pass try: # START reload_tokenizer tokenizer = Tokenizer.from_file("data/tokenizer-wiki.json") # END reload_tokenizer except Exception: tokenizer = Tokenizer.from_file(doc_wiki_tokenizer) # START encode output = tokenizer.encode("Hello, y'all! How are you 😁 ?") # END encode # START print_tokens print(output.tokens) # ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"] # END print_tokens assert output.tokens == [ "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", ] # START print_ids print(output.ids) # [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # END print_ids assert output.ids == [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35] # START print_offsets print(output.offsets[9]) # (26, 27) # END print_offsets assert output.offsets[9] == (26, 27) # START use_offsets sentence = "Hello, y'all! How are you 😁 ?" sentence[26:27] # "😁" # END use_offsets assert sentence[26:27] == "😁" # START check_sep tokenizer.token_to_id("[SEP]") # 2 # END check_sep assert tokenizer.token_to_id("[SEP]") == 2 # START init_template_processing from tokenizers.processors import TemplateProcessing tokenizer.post_processor = TemplateProcessing( single="[CLS] $A [SEP]", pair="[CLS] $A [SEP] $B:1 [SEP]:1", special_tokens=[ ("[CLS]", tokenizer.token_to_id("[CLS]")), ("[SEP]", tokenizer.token_to_id("[SEP]")), ], ) # END init_template_processing # START print_special_tokens output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_special_tokens_pair output = tokenizer.encode("Hello, y'all!", "How are you 😁 ?") print(output.tokens) # ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"] # END print_special_tokens_pair assert output.tokens == [ "[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]", ] # START print_type_ids print(output.type_ids) # [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # END print_type_ids assert output.type_ids == [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] # START encode_batch output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) # END encode_batch # START encode_batch_pair output = tokenizer.encode_batch( [["Hello, y'all!", "How are you 😁 ?"], ["Hello to you too!", "I'm fine, thank you!"]] ) # END encode_batch_pair # START enable_padding tokenizer.enable_padding(pad_id=3, pad_token="[PAD]") # END enable_padding # START print_batch_tokens output = tokenizer.encode_batch(["Hello, y'all!", "How are you 😁 ?"]) print(output[1].tokens) # ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # END print_batch_tokens assert output[1].tokens == ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"] # START print_attention_mask print(output[1].attention_mask) # [1, 1, 1, 1, 1, 1, 1, 0] # END print_attention_mask assert output[1].attention_mask == [1, 1, 1, 1, 1, 1, 1, 0] if __name__ == "__main__": import os from urllib import request from zipfile import ZipFile disable_printing = False if not os.path.isdir("data/wikitext-103-raw"): print("Downloading wikitext-103...") wiki_text, _ = request.urlretrieve( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip" ) with ZipFile(wiki_text, "r") as z: print("Unzipping in data...") z.extractall("data") print("Now training...") TestQuicktour.slow_train()
tokenizers/bindings/python/tests/documentation/test_quicktour.py/0
{ "file_path": "tokenizers/bindings/python/tests/documentation/test_quicktour.py", "repo_id": "tokenizers", "token_count": 3290 }
431
# Encoding <tokenizerslangcontent> <python> ## Encoding [[autodoc]] tokenizers.Encoding - all - attention_mask - ids - n_sequences - offsets - overflowing - sequence_ids - special_tokens_mask - tokens - type_ids - word_ids - words </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/encoding.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/encoding.mdx", "repo_id": "tokenizers", "token_count": 190 }
432
from docutils import nodes import sphinx from sphinx.locale import _ from conf import rust_version logger = sphinx.util.logging.getLogger(__name__) class RustRef: def __call__(self, name, rawtext, text, lineno, inliner, options={}, content=[]): doctype = name.split("_")[1] parts = text.split("::") if text.startswith("~"): title = parts[-1] parts[0] = parts[0][1:] else: content = text link = self.base_link() if doctype == "struct": l, title = self.make_struct_link(parts, title) if doctype == "func": l, title = self.make_func_link(parts, title) if doctype == "meth": l, title = self.make_meth_link(parts, title) if doctype == "trait": l, title = self.make_trait_link(parts, title) link += l node = nodes.reference(internal=False, refuri=link, text=title) wrapper = nodes.literal(classes=["xref"]) wrapper += node return [wrapper], [] def base_link(self): return f"https://docs.rs/tokenizers/{rust_version}" def make_struct_link(self, parts, title): link = "" struct_name = parts[-1] path = parts[:-1] for p in path: link += f"/{p}" link += f"/struct.{struct_name}.html" return link, title def make_func_link(self, parts, title): link = "" fn_name = parts[-1] path = parts[:-1] for p in path: link += f"/{p}" link += f"/fn.{fn_name}.html" return link, title def make_meth_link(self, parts, title): meth_name = parts[-1] if meth_name.endswith("()"): meth_name = meth_name[:-2] link, title = self.make_struct_link(parts[:-1], title) link += f"#method.{meth_name}" if not title.endswith(")"): title += "()" return link, title def make_trait_link(self, parts, title): link = "" trait_name = parts[-1] path = parts[:-1] for p in path: link += f"/{p}" link += f"/trait.{trait_name}.html" return link, title def setup(app): app.add_role("rust_struct", RustRef()) app.add_role("rust_func", RustRef()) app.add_role("rust_meth", RustRef()) app.add_role("rust_trait", RustRef()) return { "version": "0.1", "parallel_read_safe": True, "parallel_write_safe": True, }
tokenizers/docs/source/_ext/rust_doc.py/0
{ "file_path": "tokenizers/docs/source/_ext/rust_doc.py", "repo_id": "tokenizers", "token_count": 1221 }
433
Tokenizers ==================================================================================================== Fast State-of-the-art tokenizers, optimized for both research and production `🤗 Tokenizers`_ provides an implementation of today's most used tokenizers, with a focus on performance and versatility. These tokenizers are also used in `🤗 Transformers`_. .. _🤗 Tokenizers: https://github.com/huggingface/tokenizers .. _🤗 Transformers: https://github.com/huggingface/transformers Main features: ---------------------------------------------------------------------------------------------------- - Train new vocabularies and tokenize, using today's most used tokenizers. - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU. - Easy to use, but also extremely versatile. - Designed for both research and production. - Full alignment tracking. Even with destructive normalization, it's always possible to get the part of the original sentence that corresponds to any token. - Does all the pre-processing: Truncation, Padding, add the special tokens your model needs. .. toctree:: :maxdepth: 2 :caption: Getting Started quicktour installation/main pipeline components .. toctree-tags:: :maxdepth: 3 :caption: Using 🤗 Tokenizers :glob: :python:tutorials/python/* .. toctree:: :maxdepth: 3 :caption: API Reference api/reference .. include:: entities.inc
tokenizers/docs/source/index.rst/0
{ "file_path": "tokenizers/docs/source/index.rst", "repo_id": "tokenizers", "token_count": 404 }
434
use std::time::{Duration, Instant}; use criterion::black_box; use tokenizers::{ Decoder, EncodeInput, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerImpl, Trainer, }; pub fn iter_bench_encode<M, N, PT, PP, D>( iters: u64, tokenizer: &TokenizerImpl<M, N, PT, PP, D>, lines: &[EncodeInput], ) -> Duration where M: Model, N: Normalizer, PT: PreTokenizer, PP: PostProcessor, D: Decoder, { let mut duration = Duration::new(0, 0); let mut line_index: usize = 0; for _i in 0..iters { if line_index >= lines.len() { line_index = 0; } let input = lines[line_index].clone(); let start = Instant::now(); let _ = black_box(tokenizer.encode(input, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration } pub fn iter_bench_encode_batch<M, N, PT, PP, D>( iters: u64, tokenizer: &TokenizerImpl<M, N, PT, PP, D>, batches: &[Vec<EncodeInput>], ) -> Duration where M: Model + Send + Sync, N: Normalizer + Send + Sync, PT: PreTokenizer + Send + Sync, PP: PostProcessor + Send + Sync, D: Decoder + Send + Sync, { let mut duration = Duration::new(0, 0); let mut batch_index: usize = 0; for _i in 0..iters { if batch_index >= batches.len() { batch_index = 0; } let batch = batches[batch_index].clone(); let start = Instant::now(); let _ = black_box(tokenizer.encode_batch(batch, false)); duration = duration.checked_add(start.elapsed()).unwrap(); } duration } pub fn iter_bench_train<T, M, N, PT, PP, D>( iters: u64, tokenizer: &mut TokenizerImpl<M, N, PT, PP, D>, trainer: &mut T, files: Vec<String>, ) -> Duration where T: Trainer<Model = M> + Sync, M: Model + Send + Sync, N: Normalizer + Send + Sync, PT: PreTokenizer + Send + Sync, PP: PostProcessor + Send + Sync, D: Decoder + Send + Sync, { let mut duration = Duration::new(0, 0); for _i in 0..iters { let start = Instant::now(); tokenizer.train_from_files(trainer, files.clone()).unwrap(); duration = duration.checked_add(start.elapsed()).unwrap(); } duration }
tokenizers/tokenizers/benches/common/mod.rs/0
{ "file_path": "tokenizers/tokenizers/benches/common/mod.rs", "repo_id": "tokenizers", "token_count": 964 }
435
// A dependency graph that contains any wasm must all be imported // asynchronously. This `bootstrap.js` file does the single async import, so // that no one else needs to worry about it again. import("./index.js") .catch(e => console.error("Error importing `index.js`:", e));
tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js", "repo_id": "tokenizers", "token_count": 79 }
436
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model. use std::{iter, mem}; mod model; mod serialization; pub mod trainer; mod word; type Pair = (u32, u32); /// Errors that can be encountered while using or constructing a `BPE` model. #[derive(thiserror::Error, Debug)] pub enum Error { /// An error encountered while reading files mainly. #[error("IoError: {0}")] Io(#[from] std::io::Error), /// An error forwarded from Serde, while parsing JSON #[error("JsonError: {0}")] JsonError(#[from] serde_json::Error), /// When the vocab.json file is in the wrong format #[error("Bad vocabulary json file")] BadVocabulary, /// When the merges.txt file is in the wrong format. This error holds the line /// number of the line that caused the error. #[error("Merges text file invalid at line {0}")] BadMerges(usize), /// If a token found in merges, is not in the vocab #[error("Token `{0}` out of vocabulary")] MergeTokenOutOfVocabulary(String), /// If the provided unk token is out of vocabulary #[error("Unk token `{0}` not found in the vocabulary")] UnkTokenOutOfVocabulary(String), /// Dropout not between 0 and 1. #[error("Dropout should be between 0 and 1")] InvalidDropout, } /// Provides access to the `FirstLastIterator` to any Iterator pub(crate) trait WithFirstLastIterator: Iterator + Sized { fn with_first_and_last(self) -> FirstLastIterator<Self>; } impl<I> WithFirstLastIterator for I where I: Iterator, { fn with_first_and_last(self) -> FirstLastIterator<Self> { FirstLastIterator { first: true, iter: self.peekable(), } } } /// Provides information about whether an item is the first and/or the last of the iterator pub(crate) struct FirstLastIterator<I> where I: Iterator, { first: bool, iter: iter::Peekable<I>, } impl<I> Iterator for FirstLastIterator<I> where I: Iterator, { /// (is_first, is_last, item) type Item = (bool, bool, I::Item); fn next(&mut self) -> Option<Self::Item> { let first = mem::replace(&mut self.first, false); self.iter .next() .map(|e| (first, self.iter.peek().is_none(), e)) } } // Re-export pub use model::*; pub use trainer::*; use word::*;
tokenizers/tokenizers/src/models/bpe/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs", "repo_id": "tokenizers", "token_count": 891 }
437
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder}; use serde::{ de::{MapAccess, Visitor}, ser::SerializeStruct, Deserialize, Deserializer, Serialize, Serializer, }; use std::collections::HashSet; impl Serialize for WordPiece { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { let mut model = serializer.serialize_struct("WordPiece", 5)?; // Small fields first model.serialize_field("type", "WordPiece")?; model.serialize_field("unk_token", &self.unk_token)?; model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?; model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?; // Then large ones let ordered_vocab = OrderedVocabIter::new(&self.vocab_r); model.serialize_field("vocab", &ordered_vocab)?; model.end() } } impl<'de> Deserialize<'de> for WordPiece { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_struct( "WordPiece", &[ "type", "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ], WordPieceVisitor, ) } } struct WordPieceVisitor; impl<'de> Visitor<'de> for WordPieceVisitor { type Value = WordPiece; fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { write!(fmt, "struct WordPiece") } fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error> where V: MapAccess<'de>, { let mut builder = WordPieceBuilder::new(); let mut missing_fields = vec![ // for retrocompatibility the "type" field is not mandatory "unk_token", "continuing_subword_prefix", "max_input_chars_per_word", "vocab", ] .into_iter() .collect::<HashSet<_>>(); while let Some(key) = map.next_key::<String>()? { match key.as_ref() { "unk_token" => builder = builder.unk_token(map.next_value()?), "continuing_subword_prefix" => { builder = builder.continuing_subword_prefix(map.next_value()?) } "max_input_chars_per_word" => { builder = builder.max_input_chars_per_word(map.next_value()?) } "vocab" => builder = builder.vocab(map.next_value()?), "type" => match map.next_value()? { "WordPiece" => {} u => { return Err(serde::de::Error::invalid_value( serde::de::Unexpected::Str(u), &"WordPiece", )) } }, _ => {} } missing_fields.remove::<str>(&key); } if !missing_fields.is_empty() { Err(serde::de::Error::missing_field( missing_fields.iter().next().unwrap(), )) } else { Ok(builder.build().map_err(serde::de::Error::custom)?) } } } #[cfg(test)] mod tests { use super::*; #[test] fn serde() { let wp = WordPiece::default(); let wp_s = "{\ \"type\":\"WordPiece\",\ \"unk_token\":\"[UNK]\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s); assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp); } #[test] fn deserialization_should_fail() { let missing_unk = "{\ \"type\":\"WordPiece\",\ \"continuing_subword_prefix\":\"##\",\ \"max_input_chars_per_word\":100,\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(missing_unk) .unwrap_err() .to_string() .starts_with("missing field `unk_token`")); let wrong_type = "{\ \"type\":\"WordLevel\",\ \"unk_token\":\"[UNK]\",\ \"vocab\":{}\ }"; assert!(serde_json::from_str::<WordPiece>(wrong_type) .unwrap_err() .to_string() .starts_with("invalid value: string \"WordLevel\", expected WordPiece")); } }
tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs", "repo_id": "tokenizers", "token_count": 2453 }
438
use serde::{Deserialize, Serialize}; use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior}; use crate::utils::macro_rules_attribute; use unicode_categories::UnicodeCategories; fn is_punc(x: char) -> bool { char::is_ascii_punctuation(&x) || x.is_punctuation() } #[derive(Copy, Clone, Debug, PartialEq, Eq)] #[macro_rules_attribute(impl_serde_type!)] pub struct Punctuation { #[serde(default = "default_split")] behavior: SplitDelimiterBehavior, } fn default_split() -> SplitDelimiterBehavior { SplitDelimiterBehavior::Isolated } impl Punctuation { pub fn new(behavior: SplitDelimiterBehavior) -> Self { Self { behavior } } } impl Default for Punctuation { fn default() -> Self { Self::new(SplitDelimiterBehavior::Isolated) } } impl PreTokenizer for Punctuation { fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> { pretokenized.split(|_, s| s.split(is_punc, self.behavior)) } } #[cfg(test)] mod tests { use super::*; use crate::{OffsetReferential, OffsetType}; #[test] fn punctuation_basic() { let pretok = Punctuation::default(); let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into(); pretok.pre_tokenize(&mut pretokenized).unwrap(); assert_eq!( pretokenized .get_splits(OffsetReferential::Original, OffsetType::Byte) .into_iter() .map(|(s, o, _)| (s, o)) .collect::<Vec<_>>(), vec![ ("Hey friend", (0, 10)), ("!", (10, 11)), (" How are you", (11, 27)), ("?", (27, 28)), ("!", (28, 29)), ("?", (29, 30)), ] ); } #[test] fn deserialization() { let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap(); assert_eq!(punctuation, Punctuation::default()); assert_eq!( punctuation, Punctuation::new(SplitDelimiterBehavior::Isolated) ); } #[test] #[should_panic] fn deserialization_erroneous() { let _punctuation: Punctuation = serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap(); } }
tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs", "repo_id": "tokenizers", "token_count": 1102 }
439
use crate::utils::SysRegex; use crate::{Offsets, Result}; use regex::Regex; /// Pattern used to split a NormalizedString pub trait Pattern { /// Slice the given string in a list of pattern match positions, with /// a boolean indicating whether this is a match or not. /// /// This method *must* cover the whole string in its outputs, with /// contiguous ordered slices. fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>; } impl Pattern for char { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let is_char = |c: char| -> bool { c == *self }; is_char.find_matches(inside) } } impl Pattern for &str { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if self.is_empty() { // If we try to find the matches with an empty string, just don't match anything return Ok(vec![((0, inside.chars().count()), false)]); } let re = Regex::new(&regex::escape(self))?; (&re).find_matches(inside) } } impl Pattern for &String { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { let s: &str = self; s.find_matches(inside) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for m in self.find_iter(inside) { if prev != m.start() { splits.push(((prev, m.start()), false)); } splits.push(((m.start(), m.end()), true)); prev = m.end(); } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl Pattern for &SysRegex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } } impl<F> Pattern for F where F: Fn(char) -> bool, { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut last_offset = 0; let mut last_seen = 0; let mut matches = inside .char_indices() .flat_map(|(b, c)| { last_seen = b + c.len_utf8(); if self(c) { let mut events = Vec::with_capacity(2); if last_offset < b { // We need to emit what was before this match events.push(((last_offset, b), false)); } events.push(((b, b + c.len_utf8()), true)); last_offset = b + c.len_utf8(); events } else { vec![] } }) .collect::<Vec<_>>(); // Do not forget the last potential split if last_seen > last_offset { matches.push(((last_offset, last_seen), false)); } Ok(matches) } } /// Invert the `is_match` flags for the wrapped Pattern. This is usefull /// for example when we use a regex that matches words instead of a delimiter, /// and we want to match the delimiter. pub struct Invert<P: Pattern>(pub P); impl<P: Pattern> Pattern for Invert<P> { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { Ok(self .0 .find_matches(inside)? .into_iter() .map(|(offsets, flag)| (offsets, !flag)) .collect()) } } #[cfg(test)] mod tests { use super::*; use regex::Regex; macro_rules! do_test { ($inside: expr, $pattern: expr => @ERROR) => { assert!($pattern.find_matches($inside).is_err()); }; ($inside: expr, $pattern: expr => $result: expr) => { assert_eq!($pattern.find_matches($inside).unwrap(), $result); assert_eq!( Invert($pattern).find_matches($inside).unwrap(), $result .into_iter() .map(|v: (Offsets, bool)| (v.0, !v.1)) .collect::<Vec<_>>() ); }; } #[test] fn char() { do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", 'a' => vec![((0, 0), false)]); do_test!("aaa", 'b' => vec![((0, 3), false)]); } #[test] fn str() { do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]); do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]); do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]); do_test!("aabbab", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)] ); do_test!("", "" => vec![((0, 0), false)]); do_test!("aaa", "" => vec![((0, 3), false)]); do_test!("aaa", "b" => vec![((0, 3), false)]); } #[test] fn functions() { let is_b = |c| c == 'b'; do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]); do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]); do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]); do_test!("", is_b => vec![((0, 0), false)]); do_test!("aaa", is_b => vec![((0, 3), false)]); } #[test] fn regex() { let is_whitespace = Regex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } #[test] fn sys_regex() { let is_whitespace = SysRegex::new(r"\s+").unwrap(); do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]); do_test!(" a b ", &is_whitespace => vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)] ); do_test!("", &is_whitespace => vec![((0, 0), false)]); do_test!("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘", &is_whitespace => vec![((0, 16), false), ((16, 17), true), ((17, 45), false)] ); do_test!("aaa", &is_whitespace => vec![((0, 3), false)]); } }
tokenizers/tokenizers/src/tokenizer/pattern.rs/0
{ "file_path": "tokenizers/tokenizers/src/tokenizer/pattern.rs", "repo_id": "tokenizers", "token_count": 3903 }
440
#![cfg(feature = "http")] use tokenizers::{FromPretrainedParameters, Result, Tokenizer}; #[test] fn test_from_pretrained() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "there", "dear", "friend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_revision() -> Result<()> { let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["hey", "there", "dear", "friend", "!"] ); let tokenizer = Tokenizer::from_pretrained( "anthony/tokenizers-test", Some(FromPretrainedParameters { revision: "gpt-2".to_string(), ..Default::default() }), )?; let encoding = tokenizer.encode("Hey there dear friend!", false)?; assert_eq!( encoding.get_tokens(), &["Hey", "Ġthere", "Ġdear", "Ġfriend", "!"] ); Ok(()) } #[test] fn test_from_pretrained_invalid_model() { let tokenizer = Tokenizer::from_pretrained("docs?", None); assert!(tokenizer.is_err()); } #[test] fn test_from_pretrained_invalid_revision() { let tokenizer = Tokenizer::from_pretrained( "bert-base-cased", Some(FromPretrainedParameters { revision: "gpt?".to_string(), ..Default::default() }), ); assert!(tokenizer.is_err()); }
tokenizers/tokenizers/tests/from_pretrained.rs/0
{ "file_path": "tokenizers/tokenizers/tests/from_pretrained.rs", "repo_id": "tokenizers", "token_count": 683 }
441
FROM python:3.10-slim ENV PYTHONDONTWRITEBYTECODE=1 USER root RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git ENV VIRTUAL_ENV=/usr/local RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-cache-dir librosa "transformers[sklearn,sentencepiece,vision,testing]" RUN pip uninstall -y transformers
transformers/docker/pipeline-torch.dockerfile/0
{ "file_path": "transformers/docker/pipeline-torch.dockerfile", "repo_id": "transformers", "token_count": 240 }
442
local base = import 'templates/base.libsonnet'; local tpus = import 'templates/tpus.libsonnet'; local utils = import "templates/utils.libsonnet"; local volumes = import "templates/volumes.libsonnet"; local bertBaseCased = base.BaseTest { frameworkPrefix: "hf", modelName: "bert-base-cased", mode: "example", configMaps: [], timeout: 3600, # 1 hour, in seconds image: std.extVar('image'), imageTag: std.extVar('image-tag'), tpuSettings+: { softwareVersion: "pytorch-nightly", }, accelerator: tpus.v3_8, volumeMap+: { datasets: volumes.PersistentVolumeSpec { name: "huggingface-cluster-disk", mountPath: "/datasets", }, }, command: utils.scriptCommand( ||| python -m pytest -s transformers/examples/pytorch/test_xla_examples.py -v test_exit_code=$? echo "\nFinished running commands.\n" test $test_exit_code -eq 0 ||| ), }; bertBaseCased.oneshotJob
transformers/docker/transformers-pytorch-tpu/bert-base-cased.jsonnet/0
{ "file_path": "transformers/docker/transformers-pytorch-tpu/bert-base-cased.jsonnet", "repo_id": "transformers", "token_count": 371 }
443
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installation Installieren Sie 🤗 Transformers für die Deep-Learning-Bibliothek, mit der Sie arbeiten, richten Sie Ihren Cache ein und konfigurieren Sie 🤗 Transformers optional für den Offline-Betrieb. 🤗 Transformers wurde unter Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, und Flax getestet. Folgen Sie den Installationsanweisungen unten für die von Ihnen verwendete Deep-Learning-Bibliothek: * [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. * [TensorFlow 2.0](https://www.tensorflow.org/install/pip) installation instructions. * [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. ## Installation mit pip Sie sollten 🤗 Transformers in einer [virtuellen Umgebung](https://docs.python.org/3/library/venv.html) installieren. Wenn Sie mit virtuellen Python-Umgebungen nicht vertraut sind, werfen Sie einen Blick auf diese [Anleitung](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Eine virtuelle Umgebung macht es einfacher, verschiedene Projekte zu verwalten und Kompatibilitätsprobleme zwischen Abhängigkeiten zu vermeiden. Beginnen wir mit der Erstellung einer virtuellen Umgebung in Ihrem Projektverzeichnis: ```bash python -m venv .env ``` Aktivieren wir die virtuelle Umgebung. Unter Linux und MacOs: ```bash source .env/bin/activate ``` Aktivieren wir die virtuelle Umgebung unter Windows ```bash .env/Scripts/activate ``` Jetzt können wir die 🤗 Transformers mit dem folgenden Befehl installieren: ```bash pip install transformers ``` Bei reiner CPU-Unterstützung können wir 🤗 Transformers und eine Deep-Learning-Bibliothek bequem in einer Zeile installieren. Installieren wir zum Beispiel 🤗 Transformers und PyTorch mit: ```bash pip install transformers[torch] ``` 🤗 Transformers und TensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 Transformers und Flax: ```bash pip install transformers[flax] ``` Überprüfen wir abschließend, ob 🤗 Transformers ordnungsgemäß installiert wurde, indem wir den folgenden Befehl ausführen. Es wird ein vortrainiertes Modell heruntergeladen: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` Dann wird die Kategorie und die Wahrscheinlichkeit ausgegeben: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## Installation aus dem Code Installieren wir 🤗 Transformers aus dem Quellcode mit dem folgenden Befehl: ```bash pip install git+https://github.com/huggingface/transformers ``` Dieser Befehl installiert die aktuelle `main` Version und nicht die neueste `stable` Version. Die `main`-Version ist nützlich, um mit den neuesten Entwicklungen Schritt zu halten. Zum Beispiel, wenn ein Fehler seit der letzten offiziellen Version behoben wurde, aber eine neue Version noch nicht veröffentlicht wurde. Das bedeutet jedoch, dass die "Hauptversion" nicht immer stabil ist. Wir bemühen uns, die Hauptversion einsatzbereit zu halten, und die meisten Probleme werden normalerweise innerhalb weniger Stunden oder eines Tages behoben. Wenn Sie auf ein Problem stoßen, öffnen Sie bitte ein [Issue](https://github.com/huggingface/transformers/issues), damit wir es noch schneller beheben können! Überprüfen wir, ob 🤗 Transformers richtig installiert wurde, indem Sie den folgenden Befehl ausführen: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## Editierbare Installation Sie benötigen eine bearbeitbare Installation, wenn Sie: * die "Haupt"-Version des Quellcodes verwenden möchten. * Zu 🤗 Transformers beitragen und Änderungen am Code testen wollen. Klonen Sie das Repository und installieren 🤗 Transformers mit den folgenden Befehlen: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` Diese Befehle verknüpfen den Ordner, in den Sie das Repository geklont haben, mit den Pfaden Ihrer Python-Bibliotheken. Python wird nun in dem Ordner suchen, in den Sie geklont haben, zusätzlich zu den normalen Bibliothekspfaden. Wenn zum Beispiel Ihre Python-Pakete normalerweise in `~/anaconda3/envs/main/lib/python3.7/site-packages/` installiert sind, wird Python auch den Ordner durchsuchen, in den Sie geklont haben: `~/transformers/`. <Tip warning={true}> Sie müssen den Ordner `transformers` behalten, wenn Sie die Bibliothek weiter verwenden wollen. </Tip> Jetzt können Sie Ihren Klon mit dem folgenden Befehl ganz einfach auf die neueste Version von 🤗 Transformers aktualisieren: ```bash cd ~/transformers/ git pull ``` Ihre Python-Umgebung wird beim nächsten Ausführen die `main`-Version von 🤗 Transformers finden. ## Installation mit conda Installation von dem conda Kanal `conda-forge`: ```bash conda install conda-forge::transformers ``` ## Cache Einrichtung Vorgefertigte Modelle werden heruntergeladen und lokal zwischengespeichert unter: `~/.cache/huggingface/hub`. Dies ist das Standardverzeichnis, das durch die Shell-Umgebungsvariable "TRANSFORMERS_CACHE" vorgegeben ist. Unter Windows wird das Standardverzeichnis durch `C:\Benutzer\Benutzername\.cache\huggingface\hub` angegeben. Sie können die unten aufgeführten Shell-Umgebungsvariablen - in der Reihenfolge ihrer Priorität - ändern, um ein anderes Cache-Verzeichnis anzugeben: 1. Shell-Umgebungsvariable (Standard): `HUGGINGFACE_HUB_CACHE` oder `TRANSFORMERS_CACHE`. 2. Shell-Umgebungsvariable: `HF_HOME`. 3. Shell-Umgebungsvariable: `XDG_CACHE_HOME` + `/huggingface`. <Tip> Transformers verwendet die Shell-Umgebungsvariablen `PYTORCH_TRANSFORMERS_CACHE` oder `PYTORCH_PRETRAINED_BERT_CACHE`, wenn Sie von einer früheren Iteration dieser Bibliothek kommen und diese Umgebungsvariablen gesetzt haben, sofern Sie nicht die Shell-Umgebungsvariable `TRANSFORMERS_CACHE` angeben. </Tip> ## Offline Modus Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable `TRANSFORMERS_OFFLINE=1`, um dieses Verhalten zu aktivieren. <Tip> Fügen sie [🤗 Datasets](https://huggingface.co/docs/datasets/) zu Ihrem Offline-Trainingsworkflow hinzufügen, indem Sie die Umgebungsvariable `HF_DATASETS_OFFLINE=1` setzen. </Tip> So würden Sie beispielsweise ein Programm in einem normalen Netzwerk mit einer Firewall für externe Instanzen mit dem folgenden Befehl ausführen: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Führen Sie das gleiche Programm in einer Offline-Instanz mit aus: ```bash HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` Das Skript sollte nun laufen, ohne sich aufzuhängen oder eine Zeitüberschreitung abzuwarten, da es weiß, dass es nur nach lokalen Dateien suchen soll. ### Abrufen von Modellen und Tokenizern zur Offline-Verwendung Eine andere Möglichkeit, 🤗 Transformers offline zu verwenden, besteht darin, die Dateien im Voraus herunterzuladen und dann auf ihren lokalen Pfad zu verweisen, wenn Sie sie offline verwenden müssen. Es gibt drei Möglichkeiten, dies zu tun: * Laden Sie eine Datei über die Benutzeroberfläche des [Model Hub](https://huggingface.co/models) herunter, indem Sie auf das ↓-Symbol klicken. ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * Verwenden Sie den [PreTrainedModel.from_pretrained] und [PreTrainedModel.save_pretrained] Workflow: 1. Laden Sie Ihre Dateien im Voraus mit [`PreTrainedModel.from_pretrained`] herunter: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. Speichern Sie Ihre Dateien in einem bestimmten Verzeichnis mit [`PreTrainedModel.save_pretrained`]: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. Wenn Sie nun offline sind, laden Sie Ihre Dateien mit [`PreTrainedModel.from_pretrained`] aus dem bestimmten Verzeichnis: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * Programmatisches Herunterladen von Dateien mit der [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) Bibliothek: 1. Installieren Sie die "huggingface_hub"-Bibliothek in Ihrer virtuellen Umgebung: ```bash python -m pip install huggingface_hub ``` 2. Verwenden Sie die Funktion [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub), um eine Datei in einen bestimmten Pfad herunterzuladen. Der folgende Befehl lädt zum Beispiel die Datei "config.json" aus dem Modell [T0](https://huggingface.co/bigscience/T0_3B) in den gewünschten Pfad herunter: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` Sobald Ihre Datei heruntergeladen und lokal zwischengespeichert ist, geben Sie den lokalen Pfad an, um sie zu laden und zu verwenden: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Weitere Informationen zum Herunterladen von Dateien, die auf dem Hub gespeichert sind, finden Sie im Abschnitt [Wie man Dateien vom Hub herunterlädt](https://huggingface.co/docs/hub/how-to-downstream). </Tip>
transformers/docs/source/de/installation.md/0
{ "file_path": "transformers/docs/source/de/installation.md", "repo_id": "transformers", "token_count": 3999 }
444
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # How to add a model to 🤗 Transformers? The 🤗 Transformers library is often able to offer new models thanks to community contributors. But this can be a challenging project and requires an in-depth knowledge of the 🤗 Transformers library and the model to implement. At Hugging Face, we're trying to empower more of the community to actively add models and we've put together this guide to walk you through the process of adding a PyTorch model (make sure you have [PyTorch installed](https://pytorch.org/get-started/locally/)). Along the way, you'll: - get insights into open-source best practices - understand the design principles behind one of the most popular deep learning libraries - learn how to efficiently test large models - learn how to integrate Python utilities like `black`, `ruff`, and `make fix-copies` to ensure clean and readable code A Hugging Face team member will be available to help you along the way so you'll never be alone. 🤗 ❤️ To get started, open a [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) issue for the model you want to see in 🤗 Transformers. If you're not especially picky about contributing a specific model, you can filter by the [New model label](https://github.com/huggingface/transformers/labels/New%20model) to see if there are any unclaimed model requests and work on it. Once you've opened a new model request, the first step is to get familiar with 🤗 Transformers if you aren't already! ## General overview of 🤗 Transformers First, you should get a general overview of 🤗 Transformers. 🤗 Transformers is a very opinionated library, so there is a chance that you don't agree with some of the library's philosophies or design choices. From our experience, however, we found that the fundamental design choices and philosophies of the library are crucial to efficiently scale 🤗 Transformers while keeping maintenance costs at a reasonable level. A good first starting point to better understand the library is to read the [documentation of our philosophy](philosophy). As a result of our way of working, there are some choices that we try to apply to all models: - Composition is generally favored over-abstraction - Duplicating code is not always bad if it strongly improves the readability or accessibility of a model - Model files are as self-contained as possible so that when you read the code of a specific model, you ideally only have to look into the respective `modeling_....py` file. In our opinion, the library's code is not just a means to provide a product, *e.g.* the ability to use BERT for inference, but also as the very product that we want to improve. Hence, when adding a model, the user is not only the person who will use your model, but also everybody who will read, try to understand, and possibly tweak your code. With this in mind, let's go a bit deeper into the general library design. ### Overview of models To successfully add a model, it is important to understand the interaction between your model and its config, [`PreTrainedModel`], and [`PretrainedConfig`]. For exemplary purposes, we will call the model to be added to 🤗 Transformers `BrandNewBert`. Let's take a look: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> As you can see, we do make use of inheritance in 🤗 Transformers, but we keep the level of abstraction to an absolute minimum. There are never more than two levels of abstraction for any model in the library. `BrandNewBertModel` inherits from `BrandNewBertPreTrainedModel` which in turn inherits from [`PreTrainedModel`] and that's it. As a general rule, we want to make sure that a new model only depends on [`PreTrainedModel`]. The important functionalities that are automatically provided to every new model are [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`], which are used for serialization and deserialization. All of the other important functionalities, such as `BrandNewBertModel.forward` should be completely defined in the new `modeling_brand_new_bert.py` script. Next, we want to make sure that a model with a specific head layer, such as `BrandNewBertForMaskedLM` does not inherit from `BrandNewBertModel`, but rather uses `BrandNewBertModel` as a component that can be called in its forward pass to keep the level of abstraction low. Every new model requires a configuration class, called `BrandNewBertConfig`. This configuration is always stored as an attribute in [`PreTrainedModel`], and thus can be accessed via the `config` attribute for all classes inheriting from `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Similar to the model, the configuration inherits basic serialization and deserialization functionalities from [`PretrainedConfig`]. Note that the configuration and the model are always serialized into two different formats - the model to a *pytorch_model.bin* file and the configuration to a *config.json* file. Calling the model's [`~PreTrainedModel.save_pretrained`] will automatically call the config's [`~PretrainedConfig.save_pretrained`], so that both model and configuration are saved. ### Code style When coding your new model, keep in mind that Transformers is an opinionated library and we have a few quirks of our own regarding how code should be written :-) 1. The forward pass of your model should be fully written in the modeling file while being fully independent of other models in the library. If you want to reuse a block from another model, copy the code and paste it with a `# Copied from` comment on top (see [here](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) for a good example and [there](pr_checks#check-copies) for more documentation on Copied from). 2. The code should be fully understandable, even by a non-native English speaker. This means you should pick descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`. One-letter variable names are strongly discouraged unless it's an index in a for loop. 3. More generally we prefer longer explicit code to short magical one. 4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone using your code can quickly debug it by adding print statements or breaking points. 5. Your function signature should be type-annotated. For the rest, good variable names are way more readable and understandable than type annotations. ### Overview of tokenizers Not quite ready yet :-( This section will be added soon! ## Step-by-step recipe to add a model to 🤗 Transformers Everyone has different preferences of how to port a model so it can be very helpful for you to take a look at summaries of how other contributors ported models to Hugging Face. Here is a list of community blog posts on how to port a model: 1. [Porting GPT2 Model](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [Porting WMT19 MT Model](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) From experience, we can tell you that the most important things to keep in mind when adding a model are: - Don't reinvent the wheel! Most parts of the code you will add for the new 🤗 Transformers model already exist somewhere in 🤗 Transformers. Take some time to find similar, already existing models and tokenizers you can copy from. [grep](https://www.gnu.org/software/grep/) and [rg](https://github.com/BurntSushi/ripgrep) are your friends. Note that it might very well happen that your model's tokenizer is based on one model implementation, and your model's modeling code on another one. *E.g.* FSMT's modeling code is based on BART, while FSMT's tokenizer code is based on XLM. - It's more of an engineering challenge than a scientific challenge. You should spend more time creating an efficient debugging environment rather than trying to understand all theoretical aspects of the model in the paper. - Ask for help, when you're stuck! Models are the core component of 🤗 Transformers so we at Hugging Face are more than happy to help you at every step to add your model. Don't hesitate to ask if you notice you are not making progress. In the following, we try to give you a general recipe that we found most useful when porting a model to 🤗 Transformers. The following list is a summary of everything that has to be done to add a model and can be used by you as a To-Do List: ☐ (Optional) Understood the model's theoretical aspects<br> ☐ Prepared 🤗 Transformers dev environment<br> ☐ Set up debugging environment of the original repository<br> ☐ Created script that successfully runs the `forward()` pass using the original repository and checkpoint<br> ☐ Successfully added the model skeleton to 🤗 Transformers<br> ☐ Successfully converted original checkpoint to 🤗 Transformers checkpoint<br> ☐ Successfully ran `forward()` pass in 🤗 Transformers that gives identical output to original checkpoint<br> ☐ Finished model tests in 🤗 Transformers<br> ☐ Successfully added tokenizer in 🤗 Transformers<br> ☐ Run end-to-end integration tests<br> ☐ Finished docs<br> ☐ Uploaded model weights to the Hub<br> ☐ Submitted the pull request<br> ☐ (Optional) Added a demo notebook To begin with, we usually recommend starting by getting a good theoretical understanding of `BrandNewBert`. However, if you prefer to understand the theoretical aspects of the model *on-the-job*, then it is totally fine to directly dive into the `BrandNewBert`'s code-base. This option might suit you better if your engineering skills are better than your theoretical skill, if you have trouble understanding `BrandNewBert`'s paper, or if you just enjoy programming much more than reading scientific papers. ### 1. (Optional) Theoretical aspects of BrandNewBert You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is not to get a deep theoretical understanding of the paper, but to extract the necessary information required to effectively re-implement the model in 🤗 Transformers. That being said, you don't have to spend too much time on the theoretical aspects, but rather focus on the practical ones, namely: - What type of model is *brand_new_bert*? BERT-like encoder-only model? GPT2-like decoder-only model? BART-like encoder-decoder model? Look at the [model_summary](model_summary) if you're not familiar with the differences between those. - What are the applications of *brand_new_bert*? Text classification? Text generation? Seq2Seq tasks, *e.g.,* summarization? - What is the novel feature of the model that makes it different from BERT/GPT-2/BART? - Which of the already existing [🤗 Transformers models](https://huggingface.co/transformers/#contents) is most similar to *brand_new_bert*? - What type of tokenizer is used? A sentencepiece tokenizer? Word piece tokenizer? Is it the same tokenizer as used for BERT or BART? After you feel like you have gotten a good overview of the architecture of the model, you might want to write to the Hugging Face team with any questions you might have. This might include questions regarding the model's architecture, its attention layer, etc. We will be more than happy to help you. ### 2. Next prepare your environment 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the ‘Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Set up a development environment, for instance by running the following command: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a failure with this command. If that's the case make sure to install the Deep Learning framework you are working with (PyTorch, TensorFlow and/or Flax) then do: ```bash pip install -e ".[quality]" ``` which should be enough for most use cases. You can then return to the parent directory ```bash cd .. ``` 4. We recommend adding the PyTorch version of *brand_new_bert* to Transformers. To install PyTorch, please follow the instructions on https://pytorch.org/get-started/locally/. **Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. 5. To port *brand_new_bert*, you will also need access to its original repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Now you have set up a development environment to port *brand_new_bert* to 🤗 Transformers. ### 3.-4. Run a pretrained checkpoint using the original repository At first, you will work on the original *brand_new_bert* repository. Often, the original implementation is very “researchy”. Meaning that documentation might be lacking and the code can be difficult to understand. But this should be exactly your motivation to reimplement *brand_new_bert*. At Hugging Face, one of our main goals is to *make people stand on the shoulders of giants* which translates here very well into taking a working model and rewriting it to make it as **accessible, user-friendly, and beautiful** as possible. This is the number-one motivation to re-implement models into 🤗 Transformers - trying to make complex new NLP technology accessible to **everybody**. You should start thereby by diving into the original repository. Successfully running the official pretrained model in the original repository is often **the most difficult** step. From our experience, it is very important to spend some time getting familiar with the original code-base. You need to figure out the following: - Where to find the pretrained weights? - How to load the pretrained weights into the corresponding model? - How to run the tokenizer independently from the model? - Trace one forward pass so that you know which classes and functions are required for a simple forward pass. Usually, you only have to reimplement those functions. - Be able to locate the important components of the model: Where is the model's class? Are there model sub-classes, *e.g.* EncoderModel, DecoderModel? Where is the self-attention layer? Are there multiple different attention layers, *e.g.* *self-attention*, *cross-attention*...? - How can you debug the model in the original environment of the repo? Do you have to add *print* statements, can you work with an interactive debugger like *ipdb*, or should you use an efficient IDE to debug the model, like PyCharm? It is very important that before you start the porting process, you can **efficiently** debug code in the original repository! Also, remember that you are working with an open-source library, so do not hesitate to open an issue, or even a pull request in the original repository. The maintainers of this repository are most likely very happy about someone looking into their code! At this point, it is really up to you which debugging environment and strategy you prefer to use to debug the original model. We strongly advise against setting up a costly GPU environment, but simply work on a CPU both when starting to dive into the original repository and also when starting to write the 🤗 Transformers implementation of the model. Only at the very end, when the model has already been successfully ported to 🤗 Transformers, one should verify that the model also works as expected on GPU. In general, there are two possible debugging environments for running the original model - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Local python scripts. Jupyter notebooks have the advantage that they allow for cell-by-cell execution which can be helpful to better split logical components from one another and to have faster debugging cycles as intermediate results can be stored. Also, notebooks are often easier to share with other contributors, which might be very helpful if you want to ask the Hugging Face team for help. If you are familiar with Jupyter notebooks, we strongly recommend you work with them. The obvious disadvantage of Jupyter notebooks is that if you are not used to working with them you will have to spend some time adjusting to the new programming environment and you might not be able to use your known debugging tools anymore, like `ipdb`. For each code-base, a good first step is always to load a **small** pretrained checkpoint and to be able to reproduce a single forward pass using a dummy integer vector of input IDs as an input. Such a script could look like this (in pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Next, regarding the debugging strategy, there are generally a few from which to choose from: - Decompose the original model into many small testable components and run a forward pass on each of those for verification - Decompose the original model only into the original *tokenizer* and the original *model*, run a forward pass on those, and use intermediate print statements or breakpoints for verification Again, it is up to you which strategy to choose. Often, one or the other is advantageous depending on the original code base. If the original code-base allows you to decompose the model into smaller sub-components, *e.g.* if the original code-base can easily be run in eager mode, it is usually worth the effort to do so. There are some important advantages to taking the more difficult road in the beginning: - at a later stage when comparing the original model to the Hugging Face implementation, you can verify automatically for each component individually that the corresponding component of the 🤗 Transformers implementation matches instead of relying on visual comparison via print statements - it can give you some rope to decompose the big problem of porting a model into smaller problems of just porting individual components and thus structure your work better - separating the model into logical meaningful components will help you to get a better overview of the model's design and thus to better understand the model - at a later stage those component-by-component tests help you to ensure that no regression occurs as you continue changing your code [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) integration checks for ELECTRA gives a nice example of how this can be done. However, if the original code-base is very complex or only allows intermediate components to be run in a compiled mode, it might be too time-consuming or even impossible to separate the model into smaller testable sub-components. A good example is [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) library which is very complex and does not offer a simple way to decompose the model into its sub-components. For such libraries, one often relies on verifying print statements. No matter which strategy you choose, the recommended procedure is often the same that you should start to debug the starting layers first and the ending layers last. It is recommended that you retrieve the output, either by print statements or sub-component functions, of the following layers in the following order: 1. Retrieve the input IDs passed to the model 2. Retrieve the word embeddings 3. Retrieve the input of the first Transformer layer 4. Retrieve the output of the first Transformer layer 5. Retrieve the output of the following n - 1 Transformer layers 6. Retrieve the output of the whole BrandNewBert Model Input IDs should thereby consists of an array of integers, *e.g.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` The outputs of the following layers often consist of multi-dimensional float arrays and can look like this: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` We expect that every model added to 🤗 Transformers passes a couple of integration tests, meaning that the original model and the reimplemented version in 🤗 Transformers have to give the exact same output up to a precision of 0.001! Since it is normal that the exact same model written in different libraries can give a slightly different output depending on the library framework, we accept an error tolerance of 1e-3 (0.001). It is not enough if the model gives nearly the same output, they have to be almost identical. Therefore, you will certainly compare the intermediate outputs of the 🤗 Transformers version multiple times against the intermediate outputs of the original implementation of *brand_new_bert* in which case an **efficient** debugging environment of the original repository is absolutely important. Here is some advice to make your debugging environment as efficient as possible. - Find the best way of debugging intermediate results. Is the original repository written in PyTorch? Then you should probably take the time to write a longer script that decomposes the original model into smaller sub-components to retrieve intermediate values. Is the original repository written in Tensorflow 1? Then you might have to rely on TensorFlow print operations like [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) to output intermediate values. Is the original repository written in Jax? Then make sure that the model is **not jitted** when running the forward pass, *e.g.* check-out [this link](https://github.com/google/jax/issues/196). - Use the smallest pretrained checkpoint you can find. The smaller the checkpoint, the faster your debug cycle becomes. It is not efficient if your pretrained model is so big that your forward pass takes more than 10 seconds. In case only very large checkpoints are available, it might make more sense to create a dummy model in the new environment with randomly initialized weights and save those weights for comparison with the 🤗 Transformers version of your model - Make sure you are using the easiest way of calling a forward pass in the original repository. Ideally, you want to find the function in the original repository that **only** calls a single forward pass, *i.e.* that is often called `predict`, `evaluate`, `forward` or `__call__`. You don't want to debug a function that calls `forward` multiple times, *e.g.* to generate text, like `autoregressive_sample`, `generate`. - Try to separate the tokenization from the model's *forward* pass. If the original repository shows examples where you have to input a string, then try to find out where in the forward call the string input is changed to input ids and start from this point. This might mean that you have to possibly write a small script yourself or change the original code so that you can directly input the ids instead of an input string. - Make sure that the model in your debugging setup is **not** in training mode, which often causes the model to yield random outputs due to multiple dropout layers in the model. Make sure that the forward pass in your debugging environment is **deterministic** so that the dropout layers are not used. Or use *transformers.utils.set_seed* if the old and new implementations are in the same framework. The following section gives you more specific details/tips on how you can do this for *brand_new_bert*. ### 5.-14. Port BrandNewBert to 🤗 Transformers Next, you can finally start adding new code to 🤗 Transformers. Go into the clone of your 🤗 Transformers' fork: ```bash cd transformers ``` In the special case that you are adding a model whose architecture exactly matches the model architecture of an existing model you only have to add a conversion script as described in [this section](#write-a-conversion-script). In this case, you can just re-use the whole model architecture of the already existing model. Otherwise, let's start generating a new model. We recommend using the following script to add a model starting from an existing model: ```bash transformers-cli add-new-model-like ``` You will be prompted with a questionnaire to fill in the basic information of your model. **Open a Pull Request on the main huggingface/transformers repo** Before starting to adapt the automatically generated code, now is the time to open a “Work in progress (WIP)” pull request, *e.g.* “[WIP] Add *brand_new_bert*”, in 🤗 Transformers so that you and the Hugging Face team can work side-by-side on integrating the model into 🤗 Transformers. You should do the following: 1. Create a branch with a descriptive name from your main branch ```bash git checkout -b add_brand_new_bert ``` 2. Commit the automatically generated code: ```bash git add . git commit ``` 3. Fetch and rebase to current main ```bash git fetch upstream git rebase upstream/main ``` 4. Push the changes to your account using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for future changes. 6. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page. In the following, whenever you have made some progress, don't forget to commit your work and push it to your account so that it shows in the pull request. Additionally, you should make sure to update your work with the current main from time to time by doing: ```bash git fetch upstream git merge upstream/main ``` In general, all questions you might have regarding the model or your implementation should be asked in your PR and discussed/solved in the PR. This way, the Hugging Face team will always be notified when you are committing new code or if you have a question. It is often very helpful to point the Hugging Face team to your added code so that the Hugging Face team can efficiently understand your problem or question. To do so, you can go to the “Files changed” tab where you see all of your changes, go to a line regarding which you want to ask a question, and click on the “+” symbol to add a comment. Whenever a question or problem has been solved, you can click on the “Resolve” button of the created comment. In the same way, the Hugging Face team will open comments when reviewing your code. We recommend asking most questions on GitHub on your PR. For some very general questions that are not very useful for the public, feel free to ping the Hugging Face team by Slack or email. **5. Adapt the generated models code for brand_new_bert** At first, we will focus only on the model itself and not care about the tokenizer. All the relevant code should be found in the generated files `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` and `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Now you can finally start coding :). The generated code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` will either have the same architecture as BERT if it's an encoder-only model or BART if it's an encoder-decoder model. At this point, you should remind yourself what you've learned in the beginning about the theoretical aspects of the model: *How is the model different from BERT or BART?*". Implement those changes which often means changing the *self-attention* layer, the order of the normalization layer, etc… Again, it is often useful to look at the similar architecture of already existing models in Transformers to get a better feeling of how your model should be implemented. **Note** that at this point, you don't have to be very sure that your code is fully correct or clean. Rather, it is advised to add a first *unclean*, copy-pasted version of the original code to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` until you feel like all the necessary code is added. From our experience, it is much more efficient to quickly add a first version of the required code and improve/correct the code iteratively with the conversion script as described in the next section. The only thing that has to work at this point is that you can instantiate the 🤗 Transformers implementation of *brand_new_bert*, *i.e.* the following command should work: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` The above command will create a model according to the default parameters as defined in `BrandNewBertConfig()` with random weights, thus making sure that the `init()` methods of all components works. Note that all random initialization should happen in the `_init_weights` method of your `BrandnewBertPreTrainedModel` class. It should initialize all leaf modules depending on the variables of the config. Here is an example with the BERT `_init_weights` method: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` You can have some more custom schemes if you need a special initialization for some modules. For instance, in `Wav2Vec2ForPreTraining`, the last two linear layers need to have the initialization of the regular PyTorch `nn.Linear` but all the other ones should use an initialization as above. This is coded like this: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` The `_is_hf_initialized` flag is internally used to make sure we only initialize a submodule once. By setting it to `True` for `module.project_q` and `module.project_hid`, we make sure the custom initialization we did is not overridden later on, the `_init_weights` function won't be applied to them. **6. Write a conversion script** Next, you should write a conversion script that lets you convert the checkpoint you used to debug *brand_new_bert* in the original repository to a checkpoint compatible with your just created 🤗 Transformers implementation of *brand_new_bert*. It is not advised to write the conversion script from scratch, but rather to look through already existing conversion scripts in 🤗 Transformers for one that has been used to convert a similar model that was written in the same framework as *brand_new_bert*. Usually, it is enough to copy an already existing conversion script and slightly adapt it for your use case. Don't hesitate to ask the Hugging Face team to point you to a similar already existing conversion script for your model. - If you are porting a model from TensorFlow to PyTorch, a good starting point might be BERT's conversion script [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - If you are porting a model from PyTorch to PyTorch, a good starting point might be BART's conversion script [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) In the following, we'll quickly explain how PyTorch models store layer weights and define layer names. In PyTorch, the name of a layer is defined by the name of the class attribute you give the layer. Let's define a dummy model in PyTorch, called `SimpleModel` as follows: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Now we can create an instance of this model definition which will fill all weights: `dense`, `intermediate`, `layer_norm` with random weights. We can print the model to see its architecture ```python model = SimpleModel() print(model) ``` This will print out the following: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` We can see that the layer names are defined by the name of the class attribute in PyTorch. You can print out the weight values of a specific layer: ```python print(model.dense.weight.data) ``` to see that the weights were randomly initialized ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` In the conversion script, you should fill those randomly initialized weights with the exact weights of the corresponding layer in the checkpoint. *E.g.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` While doing so, you must verify that each randomly initialized weight of your PyTorch model and its corresponding pretrained checkpoint weight exactly match in both **shape and name**. To do so, it is **necessary** to add assert statements for the shape and print out the names of the checkpoints weights. E.g. you should add statements like: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Besides, you should also print out the names of both weights to make sure they match, *e.g.* ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` If either the shape or the name doesn't match, you probably assigned the wrong checkpoint weight to a randomly initialized layer of the 🤗 Transformers implementation. An incorrect shape is most likely due to an incorrect setting of the config parameters in `BrandNewBertConfig()` that do not exactly match those that were used for the checkpoint you want to convert. However, it could also be that PyTorch's implementation of a layer requires the weight to be transposed beforehand. Finally, you should also check that **all** required weights are initialized and print out all checkpoint weights that were not used for initialization to make sure the model is correctly converted. It is completely normal, that the conversion trials fail with either a wrong shape statement or a wrong name assignment. This is most likely because either you used incorrect parameters in `BrandNewBertConfig()`, have a wrong architecture in the 🤗 Transformers implementation, you have a bug in the `init()` functions of one of the components of the 🤗 Transformers implementation or you need to transpose one of the checkpoint weights. This step should be iterated with the previous step until all weights of the checkpoint are correctly loaded in the Transformers model. Having correctly loaded the checkpoint into the 🤗 Transformers implementation, you can then save the model under a folder of your choice `/path/to/converted/checkpoint/folder` that should then contain both a `pytorch_model.bin` file and a `config.json` file: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implement the forward pass** Having managed to correctly load the pretrained weights into the 🤗 Transformers implementation, you should now make sure that the forward pass is correctly implemented. In [Get familiar with the original repository](#3-4-run-a-pretrained-checkpoint-using-the-original-repository), you have already created a script that runs a forward pass of the model using the original repository. Now you should write an analogous script using the 🤗 Transformers implementation instead of the original one. It should look as follows: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` It is very likely that the 🤗 Transformers implementation and the original model implementation don't give the exact same output the very first time or that the forward pass throws an error. Don't be disappointed - it's expected! First, you should make sure that the forward pass doesn't throw any errors. It often happens that the wrong dimensions are used leading to a *Dimensionality mismatch* error or that the wrong data type object is used, *e.g.* `torch.long` instead of `torch.float32`. Don't hesitate to ask the Hugging Face team for help, if you don't manage to solve certain errors. The final part to make sure the 🤗 Transformers implementation works correctly is to ensure that the outputs are equivalent to a precision of `1e-3`. First, you should ensure that the output shapes are identical, *i.e.* `outputs.shape` should yield the same value for the script of the 🤗 Transformers implementation and the original implementation. Next, you should make sure that the output values are identical as well. This one of the most difficult parts of adding a new model. Common mistakes why the outputs are not identical are: - Some layers were not added, *i.e.* an *activation* layer was not added, or the residual connection was forgotten - The word embedding matrix was not tied - The wrong positional embeddings are used because the original implementation uses on offset - Dropout is applied during the forward pass. To fix this make sure *model.training is False* and that no dropout layer is falsely activated during the forward pass, *i.e.* pass *self.training* to [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) The best way to fix the problem is usually to look at the forward pass of the original implementation and the 🤗 Transformers implementation side-by-side and check if there are any differences. Ideally, you should debug/print out intermediate outputs of both implementations of the forward pass to find the exact position in the network where the 🤗 Transformers implementation shows a different output than the original implementation. First, make sure that the hard-coded `input_ids` in both scripts are identical. Next, verify that the outputs of the first transformation of the `input_ids` (usually the word embeddings) are identical. And then work your way up to the very last layer of the network. At some point, you will notice a difference between the two implementations, which should point you to the bug in the 🤗 Transformers implementation. From our experience, a simple and efficient way is to add many print statements in both the original implementation and 🤗 Transformers implementation, at the same positions in the network respectively, and to successively remove print statements showing the same values for intermediate presentations. When you're confident that both implementations yield the same output, verify the outputs with `torch.allclose(original_output, output, atol=1e-3)`, you're done with the most difficult part! Congratulations - the work left to be done should be a cakewalk 😊. **8. Adding all necessary model tests** At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that - a) The community can easily understand your work by looking at specific tests of *brand_new_bert* - b) Future changes to your model will not break any important feature of the model. At first, integration tests should be added. Those integration tests essentially do the same as the debugging scripts you used earlier to implement the model to 🤗 Transformers. A template of those model tests has already added by the Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be filled out by you. To ensure that those tests are passing, run ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> In case you are using Windows, you should replace `RUN_SLOW=1` with `SET RUN_SLOW=1` </Tip> Second, all features that are special to *brand_new_bert* should be tested additionally in a separate test under `BrandNewBertModelTester`/`BrandNewBertModelTest`. This part is often forgotten but is extremely useful in two ways: - It helps to transfer the knowledge you have acquired during the model addition to the community by showing how the special features of *brand_new_bert* should work. - Future contributors can quickly test changes to the model by running those special tests. **9. Implement the tokenizer** Next, we should add the tokenizer of *brand_new_bert*. Usually, the tokenizer is equivalent to or very similar to an already existing tokenizer of 🤗 Transformers. It is very important to find/extract the original tokenizer file and to manage to load this file into the 🤗 Transformers' implementation of the tokenizer. To ensure that the tokenizer works correctly, it is recommended to first create a script in the original repository that inputs a string and returns the `input_ids`. It could look similar to this (in pseudo-code): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` You might have to take a deeper look again into the original repository to find the correct tokenizer function or you might even have to do changes to your clone of the original repository to only output the `input_ids`. Having written a functional tokenization script that uses the original repository, an analogous script for 🤗 Transformers should be created. It should look similar to this: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` When both `input_ids` yield the same values, as a final step a tokenizer test file should also be added. Analogous to the modeling test files of *brand_new_bert*, the tokenization test files of *brand_new_bert* should contain a couple of hard-coded integration tests. **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a final step to ensure that the model is fully functional, it is advised that you also run all tests on GPU. It can happen that you forgot to add some `.to(self.device)` statements to internal tensors of the model, which in such a test would show in an error. In case you have no access to a GPU, the Hugging Face team can take care of running those tests for you. **11. Add Docstring** Now, all the necessary functionality for *brand_new_bert* is added - you're almost done! The only thing left to add is a nice docstring and a doc page. The Cookiecutter should have added a template file called `docs/source/model_doc/brand_new_bert.md` that you should fill out. Users of your model will usually first look at this page before using your model. Hence, the documentation must be understandable and concise. It is very useful for the community to add some *Tips* to show how the model should be used. Don't hesitate to ping the Hugging Face team regarding the docstrings. Next, make sure that the docstring added to `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` is correct and included all necessary inputs and outputs. We have a detailed guide about writing documentation and our docstring format [here](writing-documentation). It is always good to remind oneself that documentation should be treated at least as carefully as the code in 🤗 Transformers since the documentation is usually the first contact point of the community with the model. **Code refactor** Great, now you have added all the necessary code for *brand_new_bert*. At this point, you should correct some potential incorrect code style by running: ```bash make style ``` and verify that your coding style passes the quality check: ```bash make quality ``` There are a couple of other very strict design tests in 🤗 Transformers that might still be failing, which shows up in the tests of your pull request. This is often because of some missing information in the docstring or some incorrect naming. The Hugging Face team will surely help you if you're stuck here. Lastly, it is always a good idea to refactor one's code after having ensured that the code works correctly. With all tests passing, now it's a good time to go over the added code again and do some refactoring. You have now finished the coding part, congratulation! 🎉 You are Awesome! 😎 **12. Upload the models to the model hub** In this final part, you should convert and upload all checkpoints to the model hub and add a model card for each uploaded model checkpoint. You can get familiar with the hub functionalities by reading our [Model sharing and uploading Page](model_sharing). You should work alongside the Hugging Face team here to decide on a fitting name for each checkpoint and to get the required access rights to be able to upload the model under the author's organization of *brand_new_bert*. The `push_to_hub` method, present in all models in `transformers`, is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the specific characteristics of this particular checkpoint, *e.g.* On which dataset was the checkpoint pretrained/fine-tuned on? On what down-stream task should the model be used? And also include some code on how to correctly use the model. **13. (Optional) Add notebook** It is very helpful to add a notebook that showcases in-detail how *brand_new_bert* can be used for inference and/or fine-tuned on a downstream task. This is not mandatory to merge your PR, but very useful for the community. **14. Submit your finished PR** You're done programming now and can move to the last step, which is getting your PR merged into main. Usually, the Hugging Face team should have helped you already at this point, but it is worth taking some time to give your finished PR a nice description and eventually add comments to your code, if you want to point out certain design choices to your reviewer. ### Share your work!! Now, it's time to get some credit from the community for your work! Having completed a model addition is a major contribution to Transformers and the whole NLP community. Your code and the ported pre-trained models will certainly be used by hundreds and possibly even thousands of developers and researchers. You should be proud of your work and share your achievements with the community. **You have made another model that is super easy to access for everyone in the community! 🤯**
transformers/docs/source/en/add_new_model.md/0
{ "file_path": "transformers/docs/source/en/add_new_model.md", "repo_id": "transformers", "token_count": 14006 }
445
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Use tokenizers from 🤗 Tokenizers The [`PreTrainedTokenizerFast`] depends on the [🤗 Tokenizers](https://huggingface.co/docs/tokenizers) library. The tokenizers obtained from the 🤗 Tokenizers library can be loaded very simply into 🤗 Transformers. Before getting in the specifics, let's first start by creating a dummy tokenizer in a few lines: ```python >>> from tokenizers import Tokenizer >>> from tokenizers.models import BPE >>> from tokenizers.trainers import BpeTrainer >>> from tokenizers.pre_tokenizers import Whitespace >>> tokenizer = Tokenizer(BPE(unk_token="[UNK]")) >>> trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) >>> tokenizer.pre_tokenizer = Whitespace() >>> files = [...] >>> tokenizer.train(files, trainer) ``` We now have a tokenizer trained on the files we defined. We can either continue using it in that runtime, or save it to a JSON file for future re-use. ## Loading directly from the tokenizer object Let's see how to leverage this tokenizer object in the 🤗 Transformers library. The [`PreTrainedTokenizerFast`] class allows for easy instantiation, by accepting the instantiated *tokenizer* object as an argument: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer) ``` This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer page](main_classes/tokenizer) for more information. ## Loading from a JSON file In order to load a tokenizer from a JSON file, let's first start by saving our tokenizer: ```python >>> tokenizer.save("tokenizer.json") ``` The path to which we saved this file can be passed to the [`PreTrainedTokenizerFast`] initialization method using the `tokenizer_file` parameter: ```python >>> from transformers import PreTrainedTokenizerFast >>> fast_tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` This object can now be used with all the methods shared by the 🤗 Transformers tokenizers! Head to [the tokenizer page](main_classes/tokenizer) for more information.
transformers/docs/source/en/fast_tokenizers.md/0
{ "file_path": "transformers/docs/source/en/fast_tokenizers.md", "repo_id": "transformers", "token_count": 792 }
446
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Optimization The `.optimization` module provides: - an optimizer with weight decay fixed that can be used to fine-tuned models, and - several schedules in the form of schedule objects that inherit from `_LRSchedule`: - a gradient accumulation class to accumulate the gradients of multiple batches ## AdamW (PyTorch) [[autodoc]] AdamW ## AdaFactor (PyTorch) [[autodoc]] Adafactor ## AdamWeightDecay (TensorFlow) [[autodoc]] AdamWeightDecay [[autodoc]] create_optimizer ## Schedules ### Learning Rate Schedules (Pytorch) [[autodoc]] SchedulerType [[autodoc]] get_scheduler [[autodoc]] get_constant_schedule [[autodoc]] get_constant_schedule_with_warmup <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png"/> [[autodoc]] get_cosine_schedule_with_warmup <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png"/> [[autodoc]] get_cosine_with_hard_restarts_schedule_with_warmup <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png"/> [[autodoc]] get_linear_schedule_with_warmup <img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png"/> [[autodoc]] get_polynomial_decay_schedule_with_warmup [[autodoc]] get_inverse_sqrt_schedule [[autodoc]] get_wsd_schedule ### Warmup (TensorFlow) [[autodoc]] WarmUp ## Gradient Strategies ### GradientAccumulator (TensorFlow) [[autodoc]] GradientAccumulator
transformers/docs/source/en/main_classes/optimizer_schedules.md/0
{ "file_path": "transformers/docs/source/en/main_classes/optimizer_schedules.md", "repo_id": "transformers", "token_count": 761 }
447
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BARThez ## Overview The BARThez model was proposed in [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis on 23 Oct, 2020. The abstract of the paper: *Inductive transfer learning, enabled by self-supervised learning, have taken the entire Natural Language Processing (NLP) field by storm, with models such as BERT and BART setting new state of the art on countless natural language understanding tasks. While there are some notable exceptions, most of the available models and research have been conducted for the English language. In this work, we introduce BARThez, the first BART model for the French language (to the best of our knowledge). BARThez was pretrained on a very large monolingual French corpus from past research that we adapted to suit BART's perturbation schemes. Unlike already existing BERT-based French language models such as CamemBERT and FlauBERT, BARThez is particularly well-suited for generative tasks, since not only its encoder but also its decoder is pretrained. In addition to discriminative tasks from the FLUE benchmark, we evaluate BARThez on a novel summarization dataset, OrangeSum, that we release with this paper. We also continue the pretraining of an already pretrained multilingual BART on BARThez's corpus, and we show that the resulting model, which we call mBARTHez, provides a significant boost over vanilla BARThez, and is on par with or outperforms CamemBERT and FlauBERT.* This model was contributed by [moussakam](https://huggingface.co/moussakam). The Authors' code can be found [here](https://github.com/moussaKam/BARThez). <Tip> BARThez implementation is the same as BART, except for tokenization. Refer to [BART documentation](bart) for information on configuration classes and their parameters. BARThez-specific tokenizers are documented below. </Tip> ## Resources - BARThez can be fine-tuned on sequence-to-sequence tasks in a similar way as BART, check: [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md). ## BarthezTokenizer [[autodoc]] BarthezTokenizer ## BarthezTokenizerFast [[autodoc]] BarthezTokenizerFast
transformers/docs/source/en/model_doc/barthez.md/0
{ "file_path": "transformers/docs/source/en/model_doc/barthez.md", "repo_id": "transformers", "token_count": 818 }
448
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BORT <Tip warning={true}> This model is in maintenance mode only, we do not accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. </Tip> ## Overview The BORT model was proposed in [Optimal Subarchitecture Extraction for BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry. It is an optimal subset of architectural parameters for the BERT, which the authors refer to as "Bort". The abstract from the paper is the following: *We extract an optimal subset of architectural parameters for the BERT architecture from Devlin et al. (2018) by applying recent breakthroughs in algorithms for neural architecture search. This optimal subset, which we refer to as "Bort", is demonstrably smaller, having an effective (that is, not counting the embedding layer) size of 5.5% the original BERT-large architecture, and 16% of the net size. Bort is also able to be pretrained in 288 GPU hours, which is 1.2% of the time required to pretrain the highest-performing BERT parametric architectural variant, RoBERTa-large (Liu et al., 2019), and about 33% of that of the world-record, in GPU hours, required to train BERT-large on the same hardware. It is also 7.9x faster on a CPU, as well as being better performing than other compressed variants of the architecture, and some of the non-compressed variants: it obtains performance improvements of between 0.3% and 31%, absolute, with respect to BERT-large, on multiple public natural language understanding (NLU) benchmarks.* This model was contributed by [stefan-it](https://huggingface.co/stefan-it). The original code can be found [here](https://github.com/alexa/bort/). ## Usage tips - BORT's model architecture is based on BERT, refer to [BERT's documentation page](bert) for the model's API reference as well as usage examples. - BORT uses the RoBERTa tokenizer instead of the BERT tokenizer, refer to [RoBERTa's documentation page](roberta) for the tokenizer's API reference as well as usage examples. - BORT requires a specific fine-tuning algorithm, called [Agora](https://adewynter.github.io/notes/bort_algorithms_and_applications.html#fine-tuning-with-algebraic-topology) , that is sadly not open-sourced yet. It would be very useful for the community, if someone tries to implement the algorithm to make BORT fine-tuning work.
transformers/docs/source/en/model_doc/bort.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bort.md", "repo_id": "transformers", "token_count": 867 }
449
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvNeXT ## Overview The ConvNeXT model was proposed in [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. ConvNeXT is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers, that claims to outperform them. The abstract from the paper is the following: *The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.jpg" alt="drawing" width="600"/> <small> ConvNeXT architecture. Taken from the <a href="https://arxiv.org/abs/2201.03545">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). TensorFlow version of the model was contributed by [ariG23498](https://github.com/ariG23498), [gante](https://github.com/gante), and [sayakpaul](https://github.com/sayakpaul) (equal contribution). The original code can be found [here](https://github.com/facebookresearch/ConvNeXt). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ConvNeXT. <PipelineTag pipeline="image-classification"/> - [`ConvNextForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ConvNextConfig [[autodoc]] ConvNextConfig ## ConvNextFeatureExtractor [[autodoc]] ConvNextFeatureExtractor ## ConvNextImageProcessor [[autodoc]] ConvNextImageProcessor - preprocess <frameworkcontent> <pt> ## ConvNextModel [[autodoc]] ConvNextModel - forward ## ConvNextForImageClassification [[autodoc]] ConvNextForImageClassification - forward </pt> <tf> ## TFConvNextModel [[autodoc]] TFConvNextModel - call ## TFConvNextForImageClassification [[autodoc]] TFConvNextForImageClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/convnext.md/0
{ "file_path": "transformers/docs/source/en/model_doc/convnext.md", "repo_id": "transformers", "token_count": 1215 }
450
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DETR ## Overview The DETR model was proposed in [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov and Sergey Zagoruyko. DETR consists of a convolutional backbone followed by an encoder-decoder Transformer which can be trained end-to-end for object detection. It greatly simplifies a lot of the complexity of models like Faster-R-CNN and Mask-R-CNN, which use things like region proposals, non-maximum suppression procedure and anchor generation. Moreover, DETR can also be naturally extended to perform panoptic segmentation, by simply adding a mask head on top of the decoder outputs. The abstract from the paper is the following: *We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detr). ## How DETR works Here's a TLDR explaining how [`~transformers.DetrForObjectDetection`] works: First, an image is sent through a pre-trained convolutional backbone (in the paper, the authors use ResNet-50/ResNet-101). Let's assume we also add a batch dimension. This means that the input to the backbone is a tensor of shape `(batch_size, 3, height, width)`, assuming the image has 3 color channels (RGB). The CNN backbone outputs a new lower-resolution feature map, typically of shape `(batch_size, 2048, height/32, width/32)`. This is then projected to match the hidden dimension of the Transformer of DETR, which is `256` by default, using a `nn.Conv2D` layer. So now, we have a tensor of shape `(batch_size, 256, height/32, width/32).` Next, the feature map is flattened and transposed to obtain a tensor of shape `(batch_size, seq_len, d_model)` = `(batch_size, width/32*height/32, 256)`. So a difference with NLP models is that the sequence length is actually longer than usual, but with a smaller `d_model` (which in NLP is typically 768 or higher). Next, this is sent through the encoder, outputting `encoder_hidden_states` of the same shape (you can consider these as image features). Next, so-called **object queries** are sent through the decoder. This is a tensor of shape `(batch_size, num_queries, d_model)`, with `num_queries` typically set to 100 and initialized with zeros. These input embeddings are learnt positional encodings that the authors refer to as object queries, and similarly to the encoder, they are added to the input of each attention layer. Each object query will look for a particular object in the image. The decoder updates these embeddings through multiple self-attention and encoder-decoder attention layers to output `decoder_hidden_states` of the same shape: `(batch_size, num_queries, d_model)`. Next, two heads are added on top for object detection: a linear layer for classifying each object query into one of the objects or "no object", and a MLP to predict bounding boxes for each query. The model is trained using a **bipartite matching loss**: so what we actually do is compare the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a "no object" as class and "no bounding box" as bounding box). The [Hungarian matching algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) is used to find an optimal one-to-one mapping of each of the N queries to each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and [generalized IoU loss](https://giou.stanford.edu/) (for the bounding boxes) are used to optimize the parameters of the model. DETR can be naturally extended to perform panoptic segmentation (which unifies semantic segmentation and instance segmentation). [`~transformers.DetrForSegmentation`] adds a segmentation mask head on top of [`~transformers.DetrForObjectDetection`]. The mask head can be trained either jointly, or in a two steps process, where one first trains a [`~transformers.DetrForObjectDetection`] model to detect bounding boxes around both "things" (instances) and "stuff" (background things like trees, roads, sky), then freeze all the weights and train only the mask head for 25 epochs. Experimentally, these two approaches give similar results. Note that predicting boxes is required for the training to be possible, since the Hungarian matching is computed using distances between boxes. ## Usage tips - DETR uses so-called **object queries** to detect objects in an image. The number of queries determines the maximum number of objects that can be detected in a single image, and is set to 100 by default (see parameter `num_queries` of [`~transformers.DetrConfig`]). Note that it's good to have some slack (in COCO, the authors used 100, while the maximum number of objects in a COCO image is ~70). - The decoder of DETR updates the query embeddings in parallel. This is different from language models like GPT-2, which use autoregressive decoding instead of parallel. Hence, no causal attention mask is used. - DETR adds position embeddings to the hidden states at each self-attention and cross-attention layer before projecting to queries and keys. For the position embeddings of the image, one can choose between fixed sinusoidal or learned absolute position embeddings. By default, the parameter `position_embedding_type` of [`~transformers.DetrConfig`] is set to `"sine"`. - During training, the authors of DETR did find it helpful to use auxiliary losses in the decoder, especially to help the model output the correct number of objects of each class. If you set the parameter `auxiliary_loss` of [`~transformers.DetrConfig`] to `True`, then prediction feedforward neural networks and Hungarian losses are added after each decoder layer (with the FFNs sharing parameters). - If you want to train the model in a distributed environment across multiple nodes, then one should update the _num_boxes_ variable in the _DetrLoss_ class of _modeling_detr.py_. When training on multiple nodes, this should be set to the average number of target boxes across all nodes, as can be seen in the original implementation [here](https://github.com/facebookresearch/detr/blob/a54b77800eb8e64e3ad0d8237789fcbf2f8350c5/models/detr.py#L227-L232). - [`~transformers.DetrForObjectDetection`] and [`~transformers.DetrForSegmentation`] can be initialized with any convolutional backbone available in the [timm library](https://github.com/rwightman/pytorch-image-models). Initializing with a MobileNet backbone for example can be done by setting the `backbone` attribute of [`~transformers.DetrConfig`] to `"tf_mobilenetv3_small_075"`, and then initializing the model with that config. - DETR resizes the input images such that the shortest side is at least a certain amount of pixels while the longest is at most 1333 pixels. At training time, scale augmentation is used such that the shortest side is randomly set to at least 480 and at most 800 pixels. At inference time, the shortest side is set to 800. One can use [`~transformers.DetrImageProcessor`] to prepare images (and optional annotations in COCO format) for the model. Due to this resizing, images in a batch can have different sizes. DETR solves this by padding images up to the largest size in a batch, and by creating a pixel mask that indicates which pixels are real/which are padding. Alternatively, one can also define a custom `collate_fn` in order to batch images together, using [`~transformers.DetrImageProcessor.pad_and_create_pixel_mask`]. - The size of the images will determine the amount of memory being used, and will thus determine the `batch_size`. It is advised to use a batch size of 2 per GPU. See [this Github thread](https://github.com/facebookresearch/detr/issues/150) for more info. There are three ways to instantiate a DETR model (depending on what you prefer): Option 1: Instantiate DETR with pre-trained weights for entire model ```py >>> from transformers import DetrForObjectDetection >>> model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50") ``` Option 2: Instantiate DETR with randomly initialized weights for Transformer, but pre-trained weights for backbone ```py >>> from transformers import DetrConfig, DetrForObjectDetection >>> config = DetrConfig() >>> model = DetrForObjectDetection(config) ``` Option 3: Instantiate DETR with randomly initialized weights for backbone + Transformer ```py >>> config = DetrConfig(use_pretrained_backbone=False) >>> model = DetrForObjectDetection(config) ``` As a summary, consider the following table: | Task | Object detection | Instance segmentation | Panoptic segmentation | |------|------------------|-----------------------|-----------------------| | **Description** | Predicting bounding boxes and class labels around objects in an image | Predicting masks around objects (i.e. instances) in an image | Predicting masks around both objects (i.e. instances) as well as "stuff" (i.e. background things like trees and roads) in an image | | **Model** | [`~transformers.DetrForObjectDetection`] | [`~transformers.DetrForSegmentation`] | [`~transformers.DetrForSegmentation`] | | **Example dataset** | COCO detection | COCO detection, COCO panoptic | COCO panoptic | | | **Format of annotations to provide to** [`~transformers.DetrImageProcessor`] | {'image_id': `int`, 'annotations': `List[Dict]`} each Dict being a COCO object annotation | {'image_id': `int`, 'annotations': `List[Dict]`} (in case of COCO detection) or {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} (in case of COCO panoptic) | {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} and masks_path (path to directory containing PNG files of the masks) | | **Postprocessing** (i.e. converting the output of the model to Pascal VOC format) | [`~transformers.DetrImageProcessor.post_process`] | [`~transformers.DetrImageProcessor.post_process_segmentation`] | [`~transformers.DetrImageProcessor.post_process_segmentation`], [`~transformers.DetrImageProcessor.post_process_panoptic`] | | **evaluators** | `CocoEvaluator` with `iou_types="bbox"` | `CocoEvaluator` with `iou_types="bbox"` or `"segm"` | `CocoEvaluator` with `iou_tupes="bbox"` or `"segm"`, `PanopticEvaluator` | In short, one should prepare the data either in COCO detection or COCO panoptic format, then use [`~transformers.DetrImageProcessor`] to create `pixel_values`, `pixel_mask` and optional `labels`, which can then be used to train (or fine-tune) a model. For evaluation, one should first convert the outputs of the model using one of the postprocessing methods of [`~transformers.DetrImageProcessor`]. These can be be provided to either `CocoEvaluator` or `PanopticEvaluator`, which allow you to calculate metrics like mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are implemented in the [original repository](https://github.com/facebookresearch/detr). See the [example notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR) for more info regarding evaluation. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DETR. <PipelineTag pipeline="object-detection"/> - All example notebooks illustrating fine-tuning [`DetrForObjectDetection`] and [`DetrForSegmentation`] on a custom dataset can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR). - Scripts for finetuning [`DetrForObjectDetection`] with [`Trainer`] or [Accelerate](https://huggingface.co/docs/accelerate/index) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/object-detection). - See also: [Object detection task guide](../tasks/object_detection). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## DetrConfig [[autodoc]] DetrConfig ## DetrImageProcessor [[autodoc]] DetrImageProcessor - preprocess - post_process_object_detection - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation ## DetrFeatureExtractor [[autodoc]] DetrFeatureExtractor - __call__ - post_process_object_detection - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation ## DETR specific outputs [[autodoc]] models.detr.modeling_detr.DetrModelOutput [[autodoc]] models.detr.modeling_detr.DetrObjectDetectionOutput [[autodoc]] models.detr.modeling_detr.DetrSegmentationOutput ## DetrModel [[autodoc]] DetrModel - forward ## DetrForObjectDetection [[autodoc]] DetrForObjectDetection - forward ## DetrForSegmentation [[autodoc]] DetrForSegmentation - forward
transformers/docs/source/en/model_doc/detr.md/0
{ "file_path": "transformers/docs/source/en/model_doc/detr.md", "repo_id": "transformers", "token_count": 4188 }
451
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ESM ## Overview This page provides code and pre-trained weights for Transformer protein language models from Meta AI's Fundamental AI Research Team, providing the state-of-the-art ESMFold and ESM-2, and the previously released ESM-1b and ESM-1v. Transformer protein language models were introduced in the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. The first version of this paper was [preprinted in 2019](https://www.biorxiv.org/content/10.1101/622803v1?versioned=true). ESM-2 outperforms all tested single-sequence protein language models across a range of structure prediction tasks, and enables atomic resolution structure prediction. It was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido and Alexander Rives. Also introduced in this paper was ESMFold. It uses an ESM-2 stem with a head that can predict folded protein structures with state-of-the-art accuracy. Unlike [AlphaFold2](https://www.nature.com/articles/s41586-021-03819-2), it relies on the token embeddings from the large pre-trained protein language model stem and does not perform a multiple sequence alignment (MSA) step at inference time, which means that ESMFold checkpoints are fully "standalone" - they do not require a database of known protein sequences and structures with associated external query tools to make predictions, and are much faster as a result. The abstract from "Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences" is *In the field of artificial intelligence, a combination of scale in data and model capacity enabled by unsupervised learning has led to major advances in representation learning and statistical generation. In the life sciences, the anticipated growth of sequencing promises unprecedented data on natural sequence diversity. Protein language modeling at the scale of evolution is a logical step toward predictive and generative artificial intelligence for biology. To this end, we use unsupervised learning to train a deep contextual language model on 86 billion amino acids across 250 million protein sequences spanning evolutionary diversity. The resulting model contains information about biological properties in its representations. The representations are learned from sequence data alone. The learned representation space has a multiscale organization reflecting structure from the level of biochemical properties of amino acids to remote homology of proteins. Information about secondary and tertiary structure is encoded in the representations and can be identified by linear projections. Representation learning produces features that generalize across a range of applications, enabling state-of-the-art supervised prediction of mutational effect and secondary structure and improving state-of-the-art features for long-range contact prediction.* The abstract from "Language models of protein sequences at the scale of evolution enable accurate structure prediction" is *Large language models have recently been shown to develop emergent capabilities with scale, going beyond simple pattern matching to perform higher level reasoning and generate lifelike images and text. While language models trained on protein sequences have been studied at a smaller scale, little is known about what they learn about biology as they are scaled up. In this work we train models up to 15 billion parameters, the largest language models of proteins to be evaluated to date. We find that as models are scaled they learn information enabling the prediction of the three-dimensional structure of a protein at the resolution of individual atoms. We present ESMFold for high accuracy end-to-end atomic level structure prediction directly from the individual sequence of a protein. ESMFold has similar accuracy to AlphaFold2 and RoseTTAFold for sequences with low perplexity that are well understood by the language model. ESMFold inference is an order of magnitude faster than AlphaFold2, enabling exploration of the structural space of metagenomic proteins in practical timescales.* The original code can be found [here](https://github.com/facebookresearch/esm) and was was developed by the Fundamental AI Research team at Meta AI. ESM-1b, ESM-1v and ESM-2 were contributed to huggingface by [jasonliu](https://huggingface.co/jasonliu) and [Matt](https://huggingface.co/Rocketknight1). ESMFold was contributed to huggingface by [Matt](https://huggingface.co/Rocketknight1) and [Sylvain](https://huggingface.co/sgugger), with a big thank you to Nikita Smetanin, Roshan Rao and Tom Sercu for their help throughout the process! ## Usage tips - ESM models are trained with a masked language modeling (MLM) objective. - The HuggingFace port of ESMFold uses portions of the [openfold](https://github.com/aqlaboratory/openfold) library. The `openfold` library is licensed under the Apache License 2.0. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Masked language modeling task guide](../tasks/masked_language_modeling) ## EsmConfig [[autodoc]] EsmConfig - all ## EsmTokenizer [[autodoc]] EsmTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## EsmModel [[autodoc]] EsmModel - forward ## EsmForMaskedLM [[autodoc]] EsmForMaskedLM - forward ## EsmForSequenceClassification [[autodoc]] EsmForSequenceClassification - forward ## EsmForTokenClassification [[autodoc]] EsmForTokenClassification - forward ## EsmForProteinFolding [[autodoc]] EsmForProteinFolding - forward </pt> <tf> ## TFEsmModel [[autodoc]] TFEsmModel - call ## TFEsmForMaskedLM [[autodoc]] TFEsmForMaskedLM - call ## TFEsmForSequenceClassification [[autodoc]] TFEsmForSequenceClassification - call ## TFEsmForTokenClassification [[autodoc]] TFEsmForTokenClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/esm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/esm.md", "repo_id": "transformers", "token_count": 1906 }
452
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # OpenAI GPT2 <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=gpt2"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-gpt2-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/gpt2"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview OpenAI GPT-2 model was proposed in [Language Models are Unsupervised Multitask Learners](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever from [OpenAI](https://huggingface.co/openai). It's a causal (unidirectional) transformer pretrained using language modeling on a very large corpus of ~40 GB of text data. The abstract from the paper is the following: *GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1] of 8 million web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous words within some text. The diversity of the dataset causes this simple goal to contain naturally occurring demonstrations of many tasks across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than 10X the amount of data.* [Write With Transformer](https://transformer.huggingface.co/doc/gpt2-large) is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five different sizes: small, medium, large, xl and a distilled version of the small checkpoint: *distilgpt-2*. This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://openai.com/blog/better-language-models/). ## Usage tips - GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - GPT-2 was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the *run_generation.py* example script. - The model can take the *past_key_values* (for PyTorch) or *past* (for TF) as input, which is the previously computed key/value attention pairs. Using this (*past_key_values* or *past*) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see *past_key_values* argument of the [`GPT2Model.forward`] method, or for TF the *past* argument of the [`TFGPT2Model.call`] method for more information on its usage. - Enabling the *scale_attn_by_inverse_layer_idx* and *reorder_and_upcast_attn* flags will apply the training stability improvements from [Mistral](https://github.com/stanford-crfm/mistral/) (for PyTorch only). ## Usage example The `generate()` method can be used to generate text using GPT2 model. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> prompt = "GPT2 is a model developed by OpenAI." >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` ## Using Flash Attention 2 Flash Attention 2 is a faster, optimized version of the attention scores computation which relies on `cuda` kernels. ### Installation First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer). Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2: ```bash pip install -U flash-attn --no-build-isolation ``` ### Usage To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> device = "cuda" # the device to load the model onto >>> model = AutoModelForCausalLM.from_pretrained("gpt2", torch_dtype=torch.float16, attn_implementation="flash_attention_2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> prompt = "def hello_world():" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device) >>> model.to(device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] ``` ### Expected speedups Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `gpt2` checkpoint and the Flash Attention 2 version of the model using a sequence length of 512. <div style="text-align: center"> <img src="https://huggingface.co/datasets/EduardoPacheco/documentation-images/resolve/main/gpt2_flash_attention_2_speedup.jpg"> </div> ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GPT2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-generation"/> - A blog on how to [Finetune a non-English GPT-2 Model with Hugging Face](https://www.philschmid.de/fine-tune-a-non-english-gpt-2-model-with-huggingface). - A blog on [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) with GPT-2. - A blog on [Training CodeParrot 🦜 from Scratch](https://huggingface.co/blog/codeparrot), a large GPT-2 model. - A blog on [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) with GPT-2. - A blog on [How to train a Language Model with Megatron-LM](https://huggingface.co/blog/megatron-training) with a GPT-2 model. - A notebook on how to [finetune GPT2 to generate lyrics in the style of your favorite artist](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb). 🌎 - A notebook on how to [finetune GPT2 to generate tweets in the style of your favorite Twitter user](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb). 🌎 - [Causal language modeling](https://huggingface.co/course/en/chapter7/6?fw=pt#training-a-causal-language-model-from-scratch) chapter of the 🤗 Hugging Face Course. - [`GPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling), [text generation example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation), and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFGPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_clmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxGPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#causal-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb). - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Causal language modeling task guide](../tasks/language_modeling) ## GPT2Config [[autodoc]] GPT2Config ## GPT2Tokenizer [[autodoc]] GPT2Tokenizer - save_vocabulary ## GPT2TokenizerFast [[autodoc]] GPT2TokenizerFast ## GPT2 specific outputs [[autodoc]] models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput [[autodoc]] models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput <frameworkcontent> <pt> ## GPT2Model [[autodoc]] GPT2Model - forward ## GPT2LMHeadModel [[autodoc]] GPT2LMHeadModel - forward ## GPT2DoubleHeadsModel [[autodoc]] GPT2DoubleHeadsModel - forward ## GPT2ForQuestionAnswering [[autodoc]] GPT2ForQuestionAnswering - forward ## GPT2ForSequenceClassification [[autodoc]] GPT2ForSequenceClassification - forward ## GPT2ForTokenClassification [[autodoc]] GPT2ForTokenClassification - forward </pt> <tf> ## TFGPT2Model [[autodoc]] TFGPT2Model - call ## TFGPT2LMHeadModel [[autodoc]] TFGPT2LMHeadModel - call ## TFGPT2DoubleHeadsModel [[autodoc]] TFGPT2DoubleHeadsModel - call ## TFGPT2ForSequenceClassification [[autodoc]] TFGPT2ForSequenceClassification - call ## TFSequenceClassifierOutputWithPast [[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutputWithPast ## TFGPT2Tokenizer [[autodoc]] TFGPT2Tokenizer </tf> <jax> ## FlaxGPT2Model [[autodoc]] FlaxGPT2Model - __call__ ## FlaxGPT2LMHeadModel [[autodoc]] FlaxGPT2LMHeadModel - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/gpt2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt2.md", "repo_id": "transformers", "token_count": 3497 }
453
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaVA-NeXT ## Overview The LLaVA-NeXT model was proposed in [LLaVA-NeXT: Improved reasoning, OCR, and world knowledge](https://llava-vl.github.io/blog/2024-01-30-llava-next/) by Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, Yong Jae Lee. LLaVa-NeXT (also called LLaVa-1.6) improves upon [LLaVa](llava) by increasing the input image resolution and training on an improved visual instruction tuning dataset to improve OCR and common sense reasoning. The introduction from the blog is the following: *In October 2023, we released LLaVA-1.5 with a simple and efficient design along with great performance on a benchmark suite of 12 datasets. It has since served as the foundation of many comprehensive studies of data, model, and capabilities of large multimodal models (LMM), and has enabled various new applications. Today, we are thrilled to present LLaVA-NeXT, with improved reasoning, OCR, and world knowledge. LLaVA-NeXT even exceeds Gemini Pro on several benchmarks. Compared with LLaVA-1.5, LLaVA-NeXT has several improvements: Increasing the input image resolution to 4x more pixels. This allows it to grasp more visual details. It supports three aspect ratios, up to 672x672, 336x1344, 1344x336 resolution. Better visual reasoning and OCR capability with an improved visual instruction tuning data mixture. Better visual conversation for more scenarios, covering different applications. Better world knowledge and logical reasoning. Efficient deployment and inference with SGLang. Along with performance improvements, LLaVA-NeXT maintains the minimalist design and data efficiency of LLaVA-1.5. It re-uses the pretrained connector of LLaVA-1.5, and still uses less than 1M visual instruction tuning samples. The largest 34B variant finishes training in ~1 day with 32 A100s.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_next_overview.png" alt="drawing" width="600"/> <small> LLaVa-NeXT incorporates a higher input resolution by encoding various patches of the input image. Taken from the <a href="https://arxiv.org/abs/2310.03744">original paper.</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main). ## Usage tips - We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating. - Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. Below, we list the correct prompt formats to use for the text prompt "What is shown in this image?": [llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) requires the following format: ```bash "[INST] <image>\nWhat is shown in this image? [/INST]" ``` [llava-v1.6-vicuna-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-7b-hf) and [llava-v1.6-vicuna-13b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) require the following format: ```bash "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:" ``` [llava-v1.6-34b-hf](https://huggingface.co/llava-hf/llava-v1.6-34b-hf) requires the following format: ```bash "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n" ``` ## Usage example Here's how to load the model and perform inference in half-precision (`torch.float16`): ```python from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration import torch from PIL import Image import requests processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf") model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True) model.to("cuda:0") # prepare image and text prompt, using the appropriate prompt template url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true" image = Image.open(requests.get(url, stream=True).raw) prompt = "[INST] <image>\nWhat is shown in this image? [/INST]" inputs = processor(prompt, image, return_tensors="pt").to("cuda:0") # autoregressively complete prompt output = model.generate(**inputs, max_new_tokens=100) print(processor.decode(output[0], skip_special_tokens=True)) ``` ## Model optimization ### Quantization using Bitsandbytes The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with: ```python from transformers import LlavaNextForConditionalGeneration, BitsAndBytesConfig # specify how to quantize the model quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", quantization_config=quantization_config, device_map="auto") ``` ### Use Flash-Attention 2 to further speed-up generation First make sure to install flash-attn. Refer to the [original repository of Flash Attention](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with: ```python from transformers import LlavaNextForConditionalGeneration model = LlavaNextForConditionalGeneration.from_pretrained( model_id, torch_dtype=torch.float16, low_cpu_mem_usage=True, use_flash_attention_2=True ).to(0) ``` ## LlavaNextConfig [[autodoc]] LlavaNextConfig ## LlavaNextImageProcessor [[autodoc]] LlavaNextImageProcessor - preprocess ## LlavaNextProcessor [[autodoc]] LlavaNextProcessor ## LlavaNextForConditionalGeneration [[autodoc]] LlavaNextForConditionalGeneration - forward
transformers/docs/source/en/model_doc/llava_next.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llava_next.md", "repo_id": "transformers", "token_count": 2186 }
454
<!--Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MegatronBERT ## Overview The MegatronBERT model was proposed in [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. The abstract from the paper is the following: *Recent work in language modeling demonstrates that training large transformer models advances the state of the art in Natural Language Processing applications. However, very large models can be quite difficult to train due to memory constraints. In this work, we present our techniques for training very large transformer models and implement a simple, efficient intra-layer model parallel approach that enables training transformer models with billions of parameters. Our approach does not require a new compiler or library changes, is orthogonal and complimentary to pipeline model parallelism, and can be fully implemented with the insertion of a few communication operations in native PyTorch. We illustrate this approach by converging transformer based models up to 8.3 billion parameters using 512 GPUs. We sustain 15.1 PetaFLOPs across the entire application with 76% scaling efficiency when compared to a strong single GPU baseline that sustains 39 TeraFLOPs, which is 30% of peak FLOPs. To demonstrate that large language models can further advance the state of the art (SOTA), we train an 8.3 billion parameter transformer language model similar to GPT-2 and a 3.9 billion parameter model similar to BERT. We show that careful attention to the placement of layer normalization in BERT-like models is critical to achieving increased performance as the model size grows. Using the GPT-2 model we achieve SOTA results on the WikiText103 (10.8 compared to SOTA perplexity of 15.8) and LAMBADA (66.5% compared to SOTA accuracy of 63.2%) datasets. Our BERT model achieves SOTA results on the RACE dataset (90.9% compared to SOTA accuracy of 89.4%).* This model was contributed by [jdemouth](https://huggingface.co/jdemouth). The original code can be found [here](https://github.com/NVIDIA/Megatron-LM). That repository contains a multi-GPU and multi-node implementation of the Megatron Language models. In particular, it contains a hybrid model parallel approach using "tensor parallel" and "pipeline parallel" techniques. ## Usage tips We have provided pretrained [BERT-345M](https://ngc.nvidia.com/catalog/models/nvidia:megatron_bert_345m) checkpoints for use to evaluate or finetuning downstream tasks. To access these checkpoints, first [sign up](https://ngc.nvidia.com/signup) for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the [NGC documentation](https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1). Alternatively, you can directly download the checkpoints using: BERT-345M-uncased: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_uncased/zip -O megatron_bert_345m_v0_1_uncased.zip ``` BERT-345M-cased: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_bert_345m/versions/v0.1_cased/zip -O megatron_bert_345m_v0_1_cased.zip ``` Once you have obtained the checkpoints from NVIDIA GPU Cloud (NGC), you have to convert them to a format that will easily be loaded by Hugging Face Transformers and our port of the BERT code. The following commands allow you to do the conversion. We assume that the folder `models/megatron_bert` contains `megatron_bert_345m_v0_1_{cased, uncased}.zip` and that the commands are run from inside that folder: ```bash python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_uncased.zip ``` ```bash python3 $PATH_TO_TRANSFORMERS/models/megatron_bert/convert_megatron_bert_checkpoint.py megatron_bert_345m_v0_1_cased.zip ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## MegatronBertConfig [[autodoc]] MegatronBertConfig ## MegatronBertModel [[autodoc]] MegatronBertModel - forward ## MegatronBertForMaskedLM [[autodoc]] MegatronBertForMaskedLM - forward ## MegatronBertForCausalLM [[autodoc]] MegatronBertForCausalLM - forward ## MegatronBertForNextSentencePrediction [[autodoc]] MegatronBertForNextSentencePrediction - forward ## MegatronBertForPreTraining [[autodoc]] MegatronBertForPreTraining - forward ## MegatronBertForSequenceClassification [[autodoc]] MegatronBertForSequenceClassification - forward ## MegatronBertForMultipleChoice [[autodoc]] MegatronBertForMultipleChoice - forward ## MegatronBertForTokenClassification [[autodoc]] MegatronBertForTokenClassification - forward ## MegatronBertForQuestionAnswering [[autodoc]] MegatronBertForQuestionAnswering - forward
transformers/docs/source/en/model_doc/megatron-bert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/megatron-bert.md", "repo_id": "transformers", "token_count": 1735 }
455
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MusicGen ## Overview The MusicGen model was proposed in the paper [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi and Alexandre Défossez. MusicGen is a single stage auto-regressive Transformer model capable of generating high-quality music samples conditioned on text descriptions or audio prompts. The text descriptions are passed through a frozen text encoder model to obtain a sequence of hidden-state representations. MusicGen is then trained to predict discrete audio tokens, or *audio codes*, conditioned on these hidden-states. These audio tokens are then decoded using an audio compression model, such as EnCodec, to recover the audio waveform. Through an efficient token interleaving pattern, MusicGen does not require a self-supervised semantic representation of the text/audio prompts, thus eliminating the need to cascade multiple models to predict a set of codebooks (e.g. hierarchically or upsampling). Instead, it is able to generate all the codebooks in a single forward pass. The abstract from the paper is the following: *We tackle the task of conditional music generation. We introduce MusicGen, a single Language Model (LM) that operates over several streams of compressed discrete music representation, i.e., tokens. Unlike prior work, MusicGen is comprised of a single-stage transformer LM together with efficient token interleaving patterns, which eliminates the need for cascading several models, e.g., hierarchically or upsampling. Following this approach, we demonstrate how MusicGen can generate high-quality samples, while being conditioned on textual description or melodic features, allowing better controls over the generated output. We conduct extensive empirical evaluation, considering both automatic and human studies, showing the proposed approach is superior to the evaluated baselines on a standard text-to-music benchmark. Through ablation studies, we shed light over the importance of each of the components comprising MusicGen.* This model was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-gandhi). The original code can be found [here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the [Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen-). ## Usage tips - After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models) , you can convert them using the **conversion script** available at `src/transformers/models/musicgen/convert_musicgen_transformers.py` with the following command: ```bash python src/transformers/models/musicgen/convert_musicgen_transformers.py \ --checkpoint small --pytorch_dump_folder /output/path --safe_serialization ``` ## Generation MusicGen is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly better results than greedy, thus we encourage sampling mode to be used where possible. Sampling is enabled by default, and can be explicitly specified by setting `do_sample=True` in the call to [`MusicgenForConditionalGeneration.generate`], or by overriding the model's generation config (see below). Generation is limited by the sinusoidal positional embeddings to 30 second inputs. Meaning, MusicGen cannot generate more than 30 seconds of audio (1503 tokens), and input audio passed by Audio-Prompted Generation contributes to this limit so, given an input of 20 seconds of audio, MusicGen cannot generate more than 10 seconds of additional audio. Transformers supports both mono (1-channel) and stereo (2-channel) variants of MusicGen. The mono channel versions generate a single set of codebooks. The stereo versions generate 2 sets of codebooks, 1 for each channel (left/right), and each set of codebooks is decoded independently through the audio compression model. The audio streams for each channel are combined to give the final stereo output. ### Unconditional Generation The inputs for unconditional (or 'null') generation can be obtained through the method [`MusicgenForConditionalGeneration.get_unconditional_inputs`]: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1) >>> audio_values = model.generate(**unconditional_inputs, do_sample=True, max_new_tokens=256) ``` The audio outputs are a three-dimensional Torch tensor of shape `(batch_size, num_channels, sequence_length)`. To listen to the generated audio samples, you can either play them in an ipynb notebook: ```python from IPython.display import Audio sampling_rate = model.config.audio_encoder.sampling_rate Audio(audio_values[0].numpy(), rate=sampling_rate) ``` Or save them as a `.wav` file using a third-party library, e.g. `scipy`: ```python >>> import scipy >>> sampling_rate = model.config.audio_encoder.sampling_rate >>> scipy.io.wavfile.write("musicgen_out.wav", rate=sampling_rate, data=audio_values[0, 0].numpy()) ``` ### Text-Conditional Generation The model can generate an audio sample conditioned on a text prompt through use of the [`MusicgenProcessor`] to pre-process the inputs: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> inputs = processor( ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` The `guidance_scale` is used in classifier free guidance (CFG), setting the weighting between the conditional logits (which are predicted from the text prompts) and the unconditional logits (which are predicted from an unconditional or 'null' prompt). Higher guidance scale encourages the model to generate samples that are more closely linked to the input prompt, usually at the expense of poorer audio quality. CFG is enabled by setting `guidance_scale > 1`. For best results, use `guidance_scale=3` (default). ### Audio-Prompted Generation The same [`MusicgenProcessor`] can be used to pre-process an audio prompt that is used for audio continuation. In the following example, we load an audio file using the 🤗 Datasets library, which can be pip installed through the command below: ```bash pip install --upgrade pip pip install datasets[audio] ``` ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) >>> sample = next(iter(dataset))["audio"] >>> # take the first half of the audio sample >>> sample["array"] = sample["array"][: len(sample["array"]) // 2] >>> inputs = processor( ... audio=sample["array"], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) ``` For batched audio-prompted generation, the generated `audio_values` can be post-processed to remove padding by using the [`MusicgenProcessor`] class: ```python >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration >>> from datasets import load_dataset >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> dataset = load_dataset("sanchit-gandhi/gtzan", split="train", streaming=True) >>> sample = next(iter(dataset))["audio"] >>> # take the first quarter of the audio sample >>> sample_1 = sample["array"][: len(sample["array"]) // 4] >>> # take the first half of the audio sample >>> sample_2 = sample["array"][: len(sample["array"]) // 2] >>> inputs = processor( ... audio=[sample_1, sample_2], ... sampling_rate=sample["sampling_rate"], ... text=["80s blues track with groovy saxophone", "90s rock song with loud guitars and heavy drums"], ... padding=True, ... return_tensors="pt", ... ) >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) >>> # post-process to remove padding from the batched audio >>> audio_values = processor.batch_decode(audio_values, padding_mask=inputs.padding_mask) ``` ### Generation Configuration The default parameters that control the generation process, such as sampling, guidance scale and number of generated tokens, can be found in the model's generation config, and updated as desired: ```python >>> from transformers import MusicgenForConditionalGeneration >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") >>> # inspect the default generation config >>> model.generation_config >>> # increase the guidance scale to 4.0 >>> model.generation_config.guidance_scale = 4.0 >>> # decrease the max length to 256 tokens >>> model.generation_config.max_length = 256 ``` Note that any arguments passed to the generate method will **supersede** those in the generation config, so setting `do_sample=False` in the call to generate will supersede the setting of `model.generation_config.do_sample` in the generation config. ## Model Structure The MusicGen model can be de-composed into three distinct stages: 1. Text encoder: maps the text inputs to a sequence of hidden-state representations. The pre-trained MusicGen models use a frozen text encoder from either T5 or Flan-T5 2. MusicGen decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations 3. Audio encoder/decoder: used to encode an audio prompt to use as prompt tokens, and recover the audio waveform from the audio tokens predicted by the decoder Thus, the MusicGen model can either be used as a standalone decoder model, corresponding to the class [`MusicgenForCausalLM`], or as a composite model that includes the text encoder and audio encoder/decoder, corresponding to the class [`MusicgenForConditionalGeneration`]. If only the decoder needs to be loaded from the pre-trained checkpoint, it can be loaded by first specifying the correct config, or be accessed through the `.decoder` attribute of the composite model: ```python >>> from transformers import AutoConfig, MusicgenForCausalLM, MusicgenForConditionalGeneration >>> # Option 1: get decoder config and pass to `.from_pretrained` >>> decoder_config = AutoConfig.from_pretrained("facebook/musicgen-small").decoder >>> decoder = MusicgenForCausalLM.from_pretrained("facebook/musicgen-small", **decoder_config) >>> # Option 2: load the entire composite model, but only return the decoder >>> decoder = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small").decoder ``` Since the text encoder and audio encoder/decoder models are frozen during training, the MusicGen decoder [`MusicgenForCausalLM`] can be trained standalone on a dataset of encoder hidden-states and audio codes. For inference, the trained decoder can be combined with the frozen text encoder and audio encoder/decoders to recover the composite [`MusicgenForConditionalGeneration`] model. Tips: * MusicGen is trained on the 32kHz checkpoint of Encodec. You should ensure you use a compatible version of the Encodec model. * Sampling mode tends to deliver better results than greedy - you can toggle sampling with the variable `do_sample` in the call to [`MusicgenForConditionalGeneration.generate`] ## MusicgenDecoderConfig [[autodoc]] MusicgenDecoderConfig ## MusicgenConfig [[autodoc]] MusicgenConfig ## MusicgenProcessor [[autodoc]] MusicgenProcessor ## MusicgenModel [[autodoc]] MusicgenModel - forward ## MusicgenForCausalLM [[autodoc]] MusicgenForCausalLM - forward ## MusicgenForConditionalGeneration [[autodoc]] MusicgenForConditionalGeneration - forward
transformers/docs/source/en/model_doc/musicgen.md/0
{ "file_path": "transformers/docs/source/en/model_doc/musicgen.md", "repo_id": "transformers", "token_count": 3592 }
456
<!--Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # QDQBERT ## Overview The QDQBERT model can be referenced in [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius. The abstract from the paper is the following: *Quantization techniques can reduce the size of Deep Neural Networks and improve inference latency and throughput by taking advantage of high throughput integer instructions. In this paper we review the mathematical aspects of quantization parameters and evaluate their choices on a wide range of neural network models for different application domains, including vision, speech, and language. We focus on quantization techniques that are amenable to acceleration by processors with high-throughput integer math pipelines. We also present a workflow for 8-bit quantization that is able to maintain accuracy within 1% of the floating-point baseline on all networks studied, including models that are more difficult to quantize, such as MobileNets and BERT-large.* This model was contributed by [shangz](https://huggingface.co/shangz). ## Usage tips - QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to (i) linear layer inputs and weights, (ii) matmul inputs, (iii) residual add inputs, in BERT model. - QDQBERT requires the dependency of [Pytorch Quantization Toolkit](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization). To install `pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com` - QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example *google-bert/bert-base-uncased*), and perform Quantization Aware Training/Post Training Quantization. - A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for SQUAD task can be found at [transformers/examples/research_projects/quantization-qdqbert/](examples/research_projects/quantization-qdqbert/). ### Set default quantizers QDQBERT model adds fake quantization operations (pair of QuantizeLinear/DequantizeLinear ops) to BERT by `TensorQuantizer` in [Pytorch Quantization Toolkit](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization). `TensorQuantizer` is the module for quantizing tensors, with `QuantDescriptor` defining how the tensor should be quantized. Refer to [Pytorch Quantization Toolkit userguide](https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/userguide.html) for more details. Before creating QDQBERT model, one has to set the default `QuantDescriptor` defining default tensor quantizers. Example: ```python >>> import pytorch_quantization.nn as quant_nn >>> from pytorch_quantization.tensor_quant import QuantDescriptor >>> # The default tensor quantizer is set to use Max calibration method >>> input_desc = QuantDescriptor(num_bits=8, calib_method="max") >>> # The default tensor quantizer is set to be per-channel quantization for weights >>> weight_desc = QuantDescriptor(num_bits=8, axis=((0,))) >>> quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) >>> quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc) ``` ### Calibration Calibration is the terminology of passing data samples to the quantizer and deciding the best scaling factors for tensors. After setting up the tensor quantizers, one can use the following example to calibrate the model: ```python >>> # Find the TensorQuantizer and enable calibration >>> for name, module in model.named_modules(): ... if name.endswith("_input_quantizer"): ... module.enable_calib() ... module.disable_quant() # Use full precision data to calibrate >>> # Feeding data samples >>> model(x) >>> # ... >>> # Finalize calibration >>> for name, module in model.named_modules(): ... if name.endswith("_input_quantizer"): ... module.load_calib_amax() ... module.enable_quant() >>> # If running on GPU, it needs to call .cuda() again because new tensors will be created by calibration process >>> model.cuda() >>> # Keep running the quantized model >>> # ... ``` ### Export to ONNX The goal of exporting to ONNX is to deploy inference by [TensorRT](https://developer.nvidia.com/tensorrt). Fake quantization will be broken into a pair of QuantizeLinear/DequantizeLinear ONNX ops. After setting static member of TensorQuantizer to use Pytorch’s own fake quantization functions, fake quantized model can be exported to ONNX, follow the instructions in [torch.onnx](https://pytorch.org/docs/stable/onnx.html). Example: ```python >>> from pytorch_quantization.nn import TensorQuantizer >>> TensorQuantizer.use_fb_fake_quant = True >>> # Load the calibrated model >>> ... >>> # ONNX export >>> torch.onnx.export(...) ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## QDQBertConfig [[autodoc]] QDQBertConfig ## QDQBertModel [[autodoc]] QDQBertModel - forward ## QDQBertLMHeadModel [[autodoc]] QDQBertLMHeadModel - forward ## QDQBertForMaskedLM [[autodoc]] QDQBertForMaskedLM - forward ## QDQBertForSequenceClassification [[autodoc]] QDQBertForSequenceClassification - forward ## QDQBertForNextSentencePrediction [[autodoc]] QDQBertForNextSentencePrediction - forward ## QDQBertForMultipleChoice [[autodoc]] QDQBertForMultipleChoice - forward ## QDQBertForTokenClassification [[autodoc]] QDQBertForTokenClassification - forward ## QDQBertForQuestionAnswering [[autodoc]] QDQBertForQuestionAnswering - forward
transformers/docs/source/en/model_doc/qdqbert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/qdqbert.md", "repo_id": "transformers", "token_count": 1982 }
457
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SAM ## Overview SAM (Segment Anything Model) was proposed in [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. The model can be used to predict segmentation masks of any object of interest given an input image. ![example image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/sam-output.png) The abstract from the paper is the following: *We introduce the Segment Anything (SA) project: a new task, model, and dataset for image segmentation. Using our efficient model in a data collection loop, we built the largest segmentation dataset to date (by far), with over 1 billion masks on 11M licensed and privacy respecting images. The model is designed and trained to be promptable, so it can transfer zero-shot to new image distributions and tasks. We evaluate its capabilities on numerous tasks and find that its zero-shot performance is impressive -- often competitive with or even superior to prior fully supervised results. We are releasing the Segment Anything Model (SAM) and corresponding dataset (SA-1B) of 1B masks and 11M images at [https://segment-anything.com](https://segment-anything.com) to foster research into foundation models for computer vision.* Tips: - The model predicts binary masks that states the presence or not of the object of interest given an image. - The model predicts much better results if input 2D points and/or input bounding boxes are provided - You can prompt multiple points for the same image, and predict a single mask. - Fine-tuning the model is not supported yet - According to the paper, textual input should be also supported. However, at this time of writing this seems to be not supported according to [the official repository](https://github.com/facebookresearch/segment-anything/issues/4#issuecomment-1497626844). This model was contributed by [ybelkada](https://huggingface.co/ybelkada) and [ArthurZ](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/facebookresearch/segment-anything). Below is an example on how to run mask generation given an image and a 2D point: ```python import torch from PIL import Image import requests from transformers import SamModel, SamProcessor device = "cuda" if torch.cuda.is_available() else "cpu" model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device) processor = SamProcessor.from_pretrained("facebook/sam-vit-huge") img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") input_points = [[[450, 600]]] # 2D location of a window in the image inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks( outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu() ) scores = outputs.iou_scores ``` You can also process your own masks alongside the input images in the processor to be passed to the model. ```python import torch from PIL import Image import requests from transformers import SamModel, SamProcessor device = "cuda" if torch.cuda.is_available() else "cpu" model = SamModel.from_pretrained("facebook/sam-vit-huge").to(device) processor = SamProcessor.from_pretrained("facebook/sam-vit-huge") img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") mask_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" segmentation_map = Image.open(requests.get(mask_url, stream=True).raw).convert("RGB") input_points = [[[450, 600]]] # 2D location of a window in the image inputs = processor(raw_image, input_points=input_points, segmentation_maps=mask, return_tensors="pt").to(device) with torch.no_grad(): outputs = model(**inputs) masks = processor.image_processor.post_process_masks( outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu() ) scores = outputs.iou_scores ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with SAM. - [Demo notebook](https://github.com/huggingface/notebooks/blob/main/examples/segment_anything.ipynb) for using the model. - [Demo notebook](https://github.com/huggingface/notebooks/blob/main/examples/automatic_mask_generation.ipynb) for using the automatic mask generation pipeline. - [Demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Run_inference_with_MedSAM_using_HuggingFace_Transformers.ipynb) for inference with MedSAM, a fine-tuned version of SAM on the medical domain. 🌎 - [Demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SAM/Fine_tune_SAM_(segment_anything)_on_a_custom_dataset.ipynb) for fine-tuning the model on custom data. 🌎 ## SlimSAM SlimSAM, a pruned version of SAM, was proposed in [0.1% Data Makes Segment Anything Slim](https://arxiv.org/abs/2312.05284) by Zigeng Chen et al. SlimSAM reduces the size of the SAM models considerably while maintaining the same performance. Checkpoints can be found on the [hub](https://huggingface.co/models?other=slimsam), and they can be used as a drop-in replacement of SAM. ## Grounded SAM One can combine [Grounding DINO](grounding-dino) with SAM for text-based mask generation as introduced in [Grounded SAM: Assembling Open-World Models for Diverse Visual Tasks](https://arxiv.org/abs/2401.14159). You can refer to this [demo notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Grounding%20DINO/GroundingDINO_with_Segment_Anything.ipynb) 🌍 for details. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/grounded_sam.png" alt="drawing" width="900"/> <small> Grounded SAM overview. Taken from the <a href="https://github.com/IDEA-Research/Grounded-Segment-Anything">original repository</a>. </small> ## SamConfig [[autodoc]] SamConfig ## SamVisionConfig [[autodoc]] SamVisionConfig ## SamMaskDecoderConfig [[autodoc]] SamMaskDecoderConfig ## SamPromptEncoderConfig [[autodoc]] SamPromptEncoderConfig ## SamProcessor [[autodoc]] SamProcessor ## SamImageProcessor [[autodoc]] SamImageProcessor ## SamModel [[autodoc]] SamModel - forward ## TFSamModel [[autodoc]] TFSamModel - call
transformers/docs/source/en/model_doc/sam.md/0
{ "file_path": "transformers/docs/source/en/model_doc/sam.md", "repo_id": "transformers", "token_count": 2253 }
458
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the MIT License; you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SuperPoint ## Overview The SuperPoint model was proposed in [SuperPoint: Self-Supervised Interest Point Detection and Description](https://arxiv.org/abs/1712.07629) by Daniel DeTone, Tomasz Malisiewicz and Andrew Rabinovich. This model is the result of a self-supervised training of a fully-convolutional network for interest point detection and description. The model is able to detect interest points that are repeatable under homographic transformations and provide a descriptor for each point. The use of the model in its own is limited, but it can be used as a feature extractor for other tasks such as homography estimation, image matching, etc. The abstract from the paper is the following: *This paper presents a self-supervised framework for training interest point detectors and descriptors suitable for a large number of multiple-view geometry problems in computer vision. As opposed to patch-based neural networks, our fully-convolutional model operates on full-sized images and jointly computes pixel-level interest point locations and associated descriptors in one forward pass. We introduce Homographic Adaptation, a multi-scale, multi-homography approach for boosting interest point detection repeatability and performing cross-domain adaptation (e.g., synthetic-to-real). Our model, when trained on the MS-COCO generic image dataset using Homographic Adaptation, is able to repeatedly detect a much richer set of interest points than the initial pre-adapted deep model and any other traditional corner detector. The final system gives rise to state-of-the-art homography estimation results on HPatches when compared to LIFT, SIFT and ORB.* ## How to use Here is a quick example of using the model to detect interest points in an image: ```python from transformers import AutoImageProcessor, AutoModel import torch from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = AutoModel.from_pretrained("magic-leap-community/superpoint") inputs = processor(image, return_tensors="pt") outputs = model(**inputs) ``` The outputs contain the list of keypoint coordinates with their respective score and description (a 256-long vector). You can also feed multiple images to the model. Due to the nature of SuperPoint, to output a dynamic number of keypoints, you will need to use the mask attribute to retrieve the respective information : ```python from transformers import AutoImageProcessor, AutoModel import torch from PIL import Image import requests url_image_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" image_1 = Image.open(requests.get(url_image_1, stream=True).raw) url_image_2 = "http://images.cocodataset.org/test-stuff2017/000000000568.jpg" image_2 = Image.open(requests.get(url_image_2, stream=True).raw) images = [image_1, image_2] processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint") model = AutoModel.from_pretrained("magic-leap-community/superpoint") inputs = processor(images, return_tensors="pt") outputs = model(**inputs) for i in range(len(images)): image_mask = outputs.mask[i] image_indices = torch.nonzero(image_mask).squeeze() image_keypoints = outputs.keypoints[i][image_indices] image_scores = outputs.scores[i][image_indices] image_descriptors = outputs.descriptors[i][image_indices] ``` You can then print the keypoints on the image to visualize the result : ```python import cv2 for keypoint, score in zip(image_keypoints, image_scores): keypoint_x, keypoint_y = int(keypoint[0].item()), int(keypoint[1].item()) color = tuple([score.item() * 255] * 3) image = cv2.circle(image, (keypoint_x, keypoint_y), 2, color) cv2.imwrite("output_image.png", image) ``` This model was contributed by [stevenbucaille](https://huggingface.co/stevenbucaille). The original code can be found [here](https://github.com/magicleap/SuperPointPretrainedNetwork). ## SuperPointConfig [[autodoc]] SuperPointConfig ## SuperPointImageProcessor [[autodoc]] SuperPointImageProcessor - preprocess ## SuperPointForKeypointDetection [[autodoc]] SuperPointForKeypointDetection - forward
transformers/docs/source/en/model_doc/superpoint.md/0
{ "file_path": "transformers/docs/source/en/model_doc/superpoint.md", "repo_id": "transformers", "token_count": 1384 }
459
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # TVLT ## Overview The TVLT model was proposed in [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal (the first three authors contributed equally). The Textless Vision-Language Transformer (TVLT) is a model that uses raw visual and audio inputs for vision-and-language representation learning, without using text-specific modules such as tokenization or automatic speech recognition (ASR). It can perform various audiovisual and vision-language tasks like retrieval, question answering, etc. The abstract from the paper is the following: *In this work, we present the Textless Vision-Language Transformer (TVLT), where homogeneous transformer blocks take raw visual and audio inputs for vision-and-language representation learning with minimal modality-specific design, and do not use text-specific modules such as tokenization or automatic speech recognition (ASR). TVLT is trained by reconstructing masked patches of continuous video frames and audio spectrograms (masked autoencoding) and contrastive modeling to align video and audio. TVLT attains performance comparable to its text-based counterpart on various multimodal tasks, such as visual question answering, image retrieval, video retrieval, and multimodal sentiment analysis, with 28x faster inference speed and only 1/3 of the parameters. Our findings suggest the possibility of learning compact and efficient visual-linguistic representations from low-level visual and audio signals without assuming the prior existence of text.* <p align="center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/tvlt_architecture.png" alt="drawing" width="600"/> </p> <small> TVLT architecture. Taken from the <a href="[https://arxiv.org/abs/2102.03334](https://arxiv.org/abs/2209.14156)">original paper</a>. </small> The original code can be found [here](https://github.com/zinengtang/TVLT). This model was contributed by [Zineng Tang](https://huggingface.co/ZinengTang). ## Usage tips - TVLT is a model that takes both `pixel_values` and `audio_values` as input. One can use [`TvltProcessor`] to prepare data for the model. This processor wraps an image processor (for the image/video modality) and an audio feature extractor (for the audio modality) into one. - TVLT is trained with images/videos and audios of various sizes: the authors resize and crop the input images/videos to 224 and limit the length of audio spectrogram to 2048. To make batching of videos and audios possible, the authors use a `pixel_mask` that indicates which pixels are real/padding and `audio_mask` that indicates which audio values are real/padding. - The design of TVLT is very similar to that of a standard Vision Transformer (ViT) and masked autoencoder (MAE) as in [ViTMAE](vitmae). The difference is that the model includes embedding layers for the audio modality. - The PyTorch version of this model is only available in torch 1.10 and higher. ## TvltConfig [[autodoc]] TvltConfig ## TvltProcessor [[autodoc]] TvltProcessor - __call__ ## TvltImageProcessor [[autodoc]] TvltImageProcessor - preprocess ## TvltFeatureExtractor [[autodoc]] TvltFeatureExtractor - __call__ ## TvltModel [[autodoc]] TvltModel - forward ## TvltForPreTraining [[autodoc]] TvltForPreTraining - forward ## TvltForAudioVisualClassification [[autodoc]] TvltForAudioVisualClassification - forward
transformers/docs/source/en/model_doc/tvlt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/tvlt.md", "repo_id": "transformers", "token_count": 1149 }
460
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vision Transformer (ViT) ## Overview The Vision Transformer (ViT) model was proposed in [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transformer encoder on ImageNet, attaining very good results compared to familiar convolutional architectures. The abstract from the paper is the following: *While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg" alt="drawing" width="600"/> <small> ViT architecture. Taken from the <a href="https://arxiv.org/abs/2010.11929">original paper.</a> </small> Following the original Vision Transformer, some follow-up works have been made: - [DeiT](deit) (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers. The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into [`ViTModel`] or [`ViTForImageClassification`]. There are 4 variants available (in 3 different sizes): *facebook/deit-tiny-patch16-224*, *facebook/deit-small-patch16-224*, *facebook/deit-base-patch16-224* and *facebook/deit-base-patch16-384*. Note that one should use [`DeiTImageProcessor`] in order to prepare images for the model. - [BEiT](beit) (BERT pre-training of Image Transformers) by Microsoft Research. BEiT models outperform supervised pre-trained vision transformers using a self-supervised method inspired by BERT (masked image modeling) and based on a VQ-VAE. - DINO (a method for self-supervised training of Vision Transformers) by Facebook AI. Vision Transformers trained using the DINO method show very interesting properties not seen with convolutional models. They are capable of segmenting objects, without having ever been trained to do so. DINO checkpoints can be found on the [hub](https://huggingface.co/models?other=dino). - [MAE](vit_mae) (Masked Autoencoders) by Facebook AI. By pre-training Vision Transformers to reconstruct pixel values for a high portion (75%) of masked patches (using an asymmetric encoder-decoder architecture), the authors show that this simple method outperforms supervised pre-training after fine-tuning. This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code (written in JAX) can be found [here](https://github.com/google-research/vision_transformer). Note that we converted the weights from Ross Wightman's [timm library](https://github.com/rwightman/pytorch-image-models), who already converted the weights from JAX to PyTorch. Credits go to him! ## Usage tips - To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches, which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of vectors to a standard Transformer encoder. - As the Vision Transformer expects each image to be of the same size (resolution), one can use [`ViTImageProcessor`] to resize (or rescale) and normalize images for the model. - Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `google/vit-base-patch16-224` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=vit). - The available checkpoints are either (1) pre-trained on [ImageNet-21k](http://www.image-net.org/) (a collection of 14 million images and 21k classes) only, or (2) also fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to use a higher resolution than pre-training [(Touvron et al., 2019)](https://arxiv.org/abs/1906.06423), [(Kolesnikov et al., 2020)](https://arxiv.org/abs/1912.11370). In order to fine-tune at higher resolution, the authors perform 2D interpolation of the pre-trained position embeddings, according to their location in the original image. - The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant improvement of 2% to training from scratch, but still 4% behind supervised pre-training. ## Resources Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer). A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. `ViTForImageClassification` is supported by: <PipelineTag pipeline="image-classification"/> - A blog post on how to [Fine-Tune ViT for Image Classification with Hugging Face Transformers](https://huggingface.co/blog/fine-tune-vit) - A blog post on [Image Classification with Hugging Face Transformers and `Keras`](https://www.philschmid.de/image-classification-huggingface-transformers-keras) - A notebook on [Fine-tuning for Image Classification with Hugging Face Transformers](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) - A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with the Hugging Face Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) - A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) ⚗️ Optimization - A blog post on how to [Accelerate Vision Transformer (ViT) with Quantization using Optimum](https://www.philschmid.de/optimizing-vision-transformer) ⚡️ Inference - A notebook on [Quick demo: Vision Transformer (ViT) by Google Brain](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Quick_demo_of_HuggingFace_version_of_Vision_Transformer_inference.ipynb) 🚀 Deploy - A blog post on [Deploying Tensorflow Vision Models in Hugging Face with TF Serving](https://huggingface.co/blog/tf-serving-vision) - A blog post on [Deploying Hugging Face ViT on Vertex AI](https://huggingface.co/blog/deploy-vertex-ai) - A blog post on [Deploying Hugging Face ViT on Kubernetes with TF Serving](https://huggingface.co/blog/deploy-tfserving-kubernetes) ## ViTConfig [[autodoc]] ViTConfig ## ViTFeatureExtractor [[autodoc]] ViTFeatureExtractor - __call__ ## ViTImageProcessor [[autodoc]] ViTImageProcessor - preprocess <frameworkcontent> <pt> ## ViTModel [[autodoc]] ViTModel - forward ## ViTForMaskedImageModeling [[autodoc]] ViTForMaskedImageModeling - forward ## ViTForImageClassification [[autodoc]] ViTForImageClassification - forward </pt> <tf> ## TFViTModel [[autodoc]] TFViTModel - call ## TFViTForImageClassification [[autodoc]] TFViTForImageClassification - call </tf> <jax> ## FlaxVitModel [[autodoc]] FlaxViTModel - __call__ ## FlaxViTForImageClassification [[autodoc]] FlaxViTForImageClassification - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/vit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vit.md", "repo_id": "transformers", "token_count": 2815 }
461
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-ProphetNet <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xprophetnet"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xprophetnet-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xprophetnet-large-wiki100-cased-xglue-ntg"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> **DISCLAIMER:** If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title) and assign @patrickvonplaten ## Overview The XLM-ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020. XLM-ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just the next token. Its architecture is identical to ProhpetNet, but the model was trained on the multi-lingual "wiki100" Wikipedia dump. XLM-ProphetNet's model architecture and pretraining objective is same as ProphetNet, but XLM-ProphetNet was pre-trained on the cross-lingual dataset XGLUE. The abstract from the paper is the following: *In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.* The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## XLMProphetNetConfig [[autodoc]] XLMProphetNetConfig ## XLMProphetNetTokenizer [[autodoc]] XLMProphetNetTokenizer ## XLMProphetNetModel [[autodoc]] XLMProphetNetModel ## XLMProphetNetEncoder [[autodoc]] XLMProphetNetEncoder ## XLMProphetNetDecoder [[autodoc]] XLMProphetNetDecoder ## XLMProphetNetForConditionalGeneration [[autodoc]] XLMProphetNetForConditionalGeneration ## XLMProphetNetForCausalLM [[autodoc]] XLMProphetNetForCausalLM
transformers/docs/source/en/model_doc/xlm-prophetnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-prophetnet.md", "repo_id": "transformers", "token_count": 1127 }
462
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Using pipelines for a webserver <Tip> Creating an inference engine is a complex topic, and the "best" solution will most likely depend on your problem space. Are you on CPU or GPU? Do you want the lowest latency, the highest throughput, support for many models, or just highly optimize 1 specific model? There are many ways to tackle this topic, so what we are going to present is a good default to get started which may not necessarily be the most optimal solution for you. </Tip> The key thing to understand is that we can use an iterator, just like you would [on a dataset](pipeline_tutorial#using-pipelines-on-a-dataset), since a webserver is basically a system that waits for requests and treats them as they come in. Usually webservers are multiplexed (multithreaded, async, etc..) to handle various requests concurrently. Pipelines on the other hand (and mostly the underlying models) are not really great for parallelism; they take up a lot of RAM, so it's best to give them all the available resources when they are running or it's a compute-intensive job. We are going to solve that by having the webserver handle the light load of receiving and sending requests, and having a single thread handling the actual work. This example is going to use `starlette`. The actual framework is not really important, but you might have to tune or change the code if you are using another one to achieve the same effect. Create `server.py`: ```py from starlette.applications import Starlette from starlette.responses import JSONResponse from starlette.routing import Route from transformers import pipeline import asyncio async def homepage(request): payload = await request.body() string = payload.decode("utf-8") response_q = asyncio.Queue() await request.app.model_queue.put((string, response_q)) output = await response_q.get() return JSONResponse(output) async def server_loop(q): pipe = pipeline(model="google-bert/bert-base-uncased") while True: (string, response_q) = await q.get() out = pipe(string) await response_q.put(out) app = Starlette( routes=[ Route("/", homepage, methods=["POST"]), ], ) @app.on_event("startup") async def startup_event(): q = asyncio.Queue() app.model_queue = q asyncio.create_task(server_loop(q)) ``` Now you can start it with: ```bash uvicorn server:app ``` And you can query it: ```bash curl -X POST -d "test [MASK]" http://localhost:8000/ #[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...] ``` And there you go, now you have a good idea of how to create a webserver! What is really important is that we load the model only **once**, so there are no copies of the model on the webserver. This way, no unnecessary RAM is being used. Then the queuing mechanism allows you to do fancy stuff like maybe accumulating a few items before inferring to use dynamic batching: <Tip warning={true}> The code sample below is intentionally written like pseudo-code for readability. Do not run this without checking if it makes sense for your system resources! </Tip> ```py (string, rq) = await q.get() strings = [] queues = [] while True: try: (string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms except asyncio.exceptions.TimeoutError: break strings.append(string) queues.append(rq) strings outs = pipe(strings, batch_size=len(strings)) for rq, out in zip(queues, outs): await rq.put(out) ``` Again, the proposed code is optimized for readability, not for being the best code. First of all, there's no batch size limit which is usually not a great idea. Next, the timeout is reset on every queue fetch, meaning you could wait much more than 1ms before running the inference (delaying the first request by that much). It would be better to have a single 1ms deadline. This will always wait for 1ms even if the queue is empty, which might not be the best since you probably want to start doing inference if there's nothing in the queue. But maybe it does make sense if batching is really crucial for your use case. Again, there's really no one best solution. ## Few things you might want to consider ### Error checking There's a lot that can go wrong in production: out of memory, out of space, loading the model might fail, the query might be wrong, the query might be correct but still fail to run because of a model misconfiguration, and so on. Generally, it's good if the server outputs the errors to the user, so adding a lot of `try..except` statements to show those errors is a good idea. But keep in mind it may also be a security risk to reveal all those errors depending on your security context. ### Circuit breaking Webservers usually look better when they do circuit breaking. It means they return proper errors when they're overloaded instead of just waiting for the query indefinitely. Return a 503 error instead of waiting for a super long time or a 504 after a long time. This is relatively easy to implement in the proposed code since there is a single queue. Looking at the queue size is a basic way to start returning errors before your webserver fails under load. ### Blocking the main thread Currently PyTorch is not async aware, and computation will block the main thread while running. That means it would be better if PyTorch was forced to run on its own thread/process. This wasn't done here because the code is a lot more complex (mostly because threads and async and queues don't play nice together). But ultimately it does the same thing. This would be important if the inference of single items were long (> 1s) because in this case, it means every query during inference would have to wait for 1s before even receiving an error. ### Dynamic batching In general, batching is not necessarily an improvement over passing 1 item at a time (see [batching details](./main_classes/pipelines#pipeline-batching) for more information). But it can be very effective when used in the correct setting. In the API, there is no dynamic batching by default (too much opportunity for a slowdown). But for BLOOM inference - which is a very large model - dynamic batching is **essential** to provide a decent experience for everyone.
transformers/docs/source/en/pipeline_webserver.md/0
{ "file_path": "transformers/docs/source/en/pipeline_webserver.md", "repo_id": "transformers", "token_count": 1775 }
463
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image-to-Image Task Guide [[open-in-colab]] Image-to-Image task is the task where an application receives an image and outputs another image. This has various subtasks, including image enhancement (super resolution, low light enhancement, deraining and so on), image inpainting, and more. This guide will show you how to: - Use an image-to-image pipeline for super resolution task, - Run image-to-image models for same task without a pipeline. Note that as of the time this guide is released, `image-to-image` pipeline only supports super resolution task. Let's begin by installing the necessary libraries. ```bash pip install transformers ``` We can now initialize the pipeline with a [Swin2SR model](https://huggingface.co/caidas/swin2SR-lightweight-x2-64). We can then infer with the pipeline by calling it with an image. As of now, only [Swin2SR models](https://huggingface.co/models?sort=trending&search=swin2sr) are supported in this pipeline. ```python from transformers import pipeline device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') pipe = pipeline(task="image-to-image", model="caidas/swin2SR-lightweight-x2-64", device=device) ``` Now, let's load an image. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" image = Image.open(requests.get(url, stream=True).raw) print(image.size) ``` ```bash # (532, 432) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" alt="Photo of a cat"/> </div> We can now do inference with the pipeline. We will get an upscaled version of the cat image. ```python upscaled = pipe(image) print(upscaled.size) ``` ```bash # (1072, 880) ``` If you wish to do inference yourself with no pipeline, you can use the `Swin2SRForImageSuperResolution` and `Swin2SRImageProcessor` classes of transformers. We will use the same model checkpoint for this. Let's initialize the model and the processor. ```python from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-lightweight-x2-64").to(device) processor = Swin2SRImageProcessor("caidas/swin2SR-lightweight-x2-64") ``` `pipeline` abstracts away the preprocessing and postprocessing steps that we have to do ourselves, so let's preprocess the image. We will pass the image to the processor and then move the pixel values to GPU. ```python pixel_values = processor(image, return_tensors="pt").pixel_values print(pixel_values.shape) pixel_values = pixel_values.to(device) ``` We can now infer the image by passing pixel values to the model. ```python import torch with torch.no_grad(): outputs = model(pixel_values) ``` Output is an object of type `ImageSuperResolutionOutput` that looks like below 👇 ``` (loss=None, reconstruction=tensor([[[[0.8270, 0.8269, 0.8275, ..., 0.7463, 0.7446, 0.7453], [0.8287, 0.8278, 0.8283, ..., 0.7451, 0.7448, 0.7457], [0.8280, 0.8273, 0.8269, ..., 0.7447, 0.7446, 0.7452], ..., [0.5923, 0.5933, 0.5924, ..., 0.0697, 0.0695, 0.0706], [0.5926, 0.5932, 0.5926, ..., 0.0673, 0.0687, 0.0705], [0.5927, 0.5914, 0.5922, ..., 0.0664, 0.0694, 0.0718]]]], device='cuda:0'), hidden_states=None, attentions=None) ``` We need to get the `reconstruction` and post-process it for visualization. Let's see how it looks like. ```python outputs.reconstruction.data.shape # torch.Size([1, 3, 880, 1072]) ``` We need to squeeze the output and get rid of axis 0, clip the values, then convert it to be numpy float. Then we will arrange axes to have the shape [1072, 880], and finally, bring the output back to range [0, 255]. ```python import numpy as np # squeeze, take to CPU and clip the values output = outputs.reconstruction.data.squeeze().cpu().clamp_(0, 1).numpy() # rearrange the axes output = np.moveaxis(output, source=0, destination=-1) # bring values back to pixel values range output = (output * 255.0).round().astype(np.uint8) Image.fromarray(output) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat_upscaled.png" alt="Upscaled photo of a cat"/> </div>
transformers/docs/source/en/tasks/image_to_image.md/0
{ "file_path": "transformers/docs/source/en/tasks/image_to_image.md", "repo_id": "transformers", "token_count": 1725 }
464
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ¿Cómo puedo crear un pipeline personalizado? En esta guía, veremos cómo crear un pipeline personalizado y cómo compartirlo en el [Hub](https://hf.co/models) o añadirlo a la biblioteca 🤗 Transformers. En primer lugar, debes decidir las entradas que tu pipeline podrá recibir. Pueden ser strings, bytes, diccionarios o lo que te parezca que vaya a ser la entrada más apropiada. Intenta mantener estas entradas en un formato que sea tan Python puro como sea posible, puesto que esto facilita la compatibilidad (incluso con otros lenguajes de programación por medio de JSON). Estos serán los `inputs` (entradas) del pipeline (`preprocess`). Ahora debes definir los `outputs` (salidas). Al igual que con los `inputs`, entre más simple el formato, mejor. Estas serán las salidas del método `postprocess` (posprocesamiento). Empieza heredando la clase base `Pipeline` con los 4 métodos que debemos implementar: `preprocess` (preprocesamiento), `_forward` (ejecución), `postprocess` (posprocesamiento) y `_sanitize_parameters` (verificar parámetros). ```python from transformers import Pipeline class MyPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, inputs, maybe_arg=2): model_input = Tensor(inputs["input_ids"]) return {"model_input": model_input} def _forward(self, model_inputs): # model_inputs == {"model_input": model_input} outputs = self.model(**model_inputs) # Quizá {"logits": Tensor(...)} return outputs def postprocess(self, model_outputs): best_class = model_outputs["logits"].softmax(-1) return best_class ``` La estructura de este desglose es así para garantizar una compatibilidad más o menos transparente con el uso de CPU/GPU y el pre/posprocesamiento en CPU en varios hilos. `preprocess` tomará las entradas definidas originalmente y las convertirá en algo que se le pueda pasar al modelo. Podría contener más información y a menudo es un objeto `Dict` (diccionario). `_forward` contiene los detalles de la implementación y no debería ser invocado de forma directa. `forward` es el método preferido a utilizar pues contiene verificaciones para asegurar que todo funcione en el dispositivo correcto. Cualquier cosa que esté relacionada con un modelo real debería ir en el método `_forward`, todo lo demás va en los métodos de preprocesamiento y posprocesamiento. Los métodos `postprocess` reciben la salida `_forward` y la convierten en la salida final que decidimos anteriormente. `_sanitize_parameters` existe para permitir a los usuarios pasar cualesquiera parámetros cuando lo deseen, ya sea al momento de inicializar el pipeline `pipeline(...., maybe_arg=4)` o al momento de invocarlo `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. El método `_sanitize_parameters` devuelve 3 diccionarios de kwargs que serán pasados directamente a `preprocess`, `_forward` y `postprocess`. No ingreses nada si el caller no se va a invocar con parámetros adicionales. Esto permite mantener los parámetros por defecto de la definición de la función, lo que es más "natural". Un ejemplo clásico sería un argumento `top_k` en el posprocesamiento de una tarea de clasificación. ```python >>> pipe = pipeline("my-new-task") >>> pipe("This is a test") [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} {"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] >>> pipe("This is a test", top_k=2) [{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] ``` Para lograrlo, actualizaremos nuestro método `postprocess` con un valor por defecto de `5` y modificaremos `_sanitize_parameters` para permitir este nuevo parámetro. ```python def postprocess(self, model_outputs, top_k=5): best_class = model_outputs["logits"].softmax(-1) # Añade la lógica para manejar el top_k return best_class def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] postprocess_kwargs = {} if "top_k" in kwargs: postprocess_kwargs["top_k"] = kwargs["top_k"] return preprocess_kwargs, {}, postprocess_kwargs ``` Intenta que las entradas y salidas sean muy simples e, idealmente, que puedan serializarse como JSON, pues esto hace el uso del pipeline muy sencillo sin que el usuario tenga que preocuparse por conocer nuevos tipos de objetos. También es relativamente común tener compatibilidad con muchos tipos diferentes de argumentos por facilidad de uso (por ejemplo, los archivos de audio pueden ser nombres de archivo, URLs o bytes). ## Añadirlo a la lista de tareas Para registrar tu `new-task` (nueva tarea) en la lista de tareas, debes añadirla al `PIPELINE_REGISTRY` (registro de pipelines): ```python from transformers.pipelines import PIPELINE_REGISTRY PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, ) ``` Puedes especificar un modelo por defecto si lo deseas, en cuyo caso debe venir con una versión específica (que puede ser el nombre de un branch o hash de commit, en este caso usamos `"abcdef"`), así como el tipo: ```python PIPELINE_REGISTRY.register_pipeline( "new-task", pipeline_class=MyPipeline, pt_model=AutoModelForSequenceClassification, default={"pt": ("user/awesome_model", "abcdef")}, type="text", # tipo de datos que maneja: texto, audio, imagen, multi-modalidad ) ``` ## Comparte tu pipeline en el Hub Para compartir tu pipeline personalizado en el Hub, solo tienes que guardar el código personalizado de tu sub-clase `Pipeline` en un archivo de Python. Por ejemplo, digamos que queremos usar un pipeline personalizado para la clasificación de duplas de oraciones de esta forma: ```py import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits} ``` La implementación es independiente del framework y funcionará con modelos de PyTorch y TensorFlow. Si guardamos esto en un archivo llamado `pair_classification.py`, podemos importarlo y registrarlo de la siguiente manera: ```py from pair_classification import PairClassificationPipeline from transformers.pipelines import PIPELINE_REGISTRY from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, tf_model=TFAutoModelForSequenceClassification, ) ``` Una vez hecho esto, podemos usarlo con un modelo pre-entrenado. Por ejemplo, al modelo `sgugger/finetuned-bert-mrpc` se le hizo fine-tuning con el dataset MRPC, en el cual se clasifican duplas de oraciones como paráfrasis o no. ```py from transformers import pipeline classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") ``` Ahora podemos compartirlo en el Hub usando el método `save_pretrained`: ```py classifier.push_to_hub("test-dynamic-pipeline") ``` Esto copiará el archivo donde definiste `PairClassificationPipeline` dentro de la carpeta `"test-dynamic-pipeline"`, y además guardará el modelo y el tokenizer del pipeline, antes de enviar todo al repositorio `{your_username}/test-dynamic-pipeline`. Después de esto, cualquier persona puede usarlo siempre que usen la opción `trust_remote_code=True` (confiar en código remoto): ```py from transformers import pipeline classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) ``` ## Añadir el pipeline a 🤗 Transformers Si quieres contribuir tu pipeline a la biblioteca 🤗 Transformers, tendrás que añadirlo a un nuevo módulo en el sub-módulo `pipelines` con el código de tu pipeline. Luego, debes añadirlo a la lista de tareas definidas en `pipelines/__init__.py`. A continuación tienes que añadir las pruebas. Crea un nuevo archivo llamado `tests/test_pipelines_MY_PIPELINE.py` basándote en las pruebas existentes. La función `run_pipeline_test` será muy genérica y se correrá sobre modelos pequeños escogidos al azar sobre todas las arquitecturas posibles definidas en `model_mapping` y `tf_model_mapping`. Esto es muy importante para probar compatibilidades a futuro, lo que significa que si alguien añade un nuevo modelo para `XXXForQuestionAnswering` entonces el pipeline intentará ejecutarse con ese modelo. Ya que los modelos son aleatorios, es imposible verificar los valores como tales, y es por eso que hay un helper `ANY` que simplemente intentará que la salida tenga el mismo tipo que la salida esperada del pipeline. También *debes* implementar 2 (preferiblemente 4) pruebas: - `test_small_model_pt` : Define un (1) modelo pequeño para este pipeline (no importa si los resultados no tienen sentido) y prueba las salidas del pipeline. Los resultados deberían ser los mismos que en `test_small_model_tf`. - `test_small_model_tf` : Define un (1) modelo pequeño para este pipeline (no importa si los resultados no tienen sentido) y prueba las salidas del pipeline. Los resultados deberían ser los mismos que en `test_small_model_pt`. - `test_large_model_pt` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido. Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que no haya divergencias en versiones futuras. - `test_large_model_tf` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido. Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que no haya divergencias en versiones futuras.
transformers/docs/source/es/add_new_pipeline.md/0
{ "file_path": "transformers/docs/source/es/add_new_pipeline.md", "repo_id": "transformers", "token_count": 4249 }
465
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Modelado de lenguaje El modelado de lenguaje predice palabras en un enunciado. Hay dos formas de modelado de lenguaje. <Youtube id="Vpjb1lu0MDk"/> El modelado de lenguaje causal predice el siguiente token en una secuencia de tokens, y el modelo solo puede considerar los tokens a la izquierda. <Youtube id="mqElG5QJWUg"/> El modelado de lenguaje por enmascaramiento predice un token enmascarado en una secuencia, y el modelo puede considerar los tokens bidireccionalmente. Esta guía te mostrará cómo realizar fine-tuning [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) para modelos de lenguaje causales y [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) para modelos de lenguaje por enmascaramiento en el [r/askscience](https://www.reddit.com/r/askscience/) subdataset [ELI5](https://huggingface.co/datasets/eli5). <Tip> Mira la [página de tarea](https://huggingface.co/tasks/text-generation) para generación de texto y la [página de tarea](https://huggingface.co/tasks/fill-mask) para modelos de lenguajes por enmascaramiento para obtener más información sobre los modelos, datasets, y métricas asociadas. </Tip> ## Carga el dataset ELI5 Carga solo los primeros 5000 registros desde la biblioteca 🤗 Datasets, dado que es bastante grande: ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5", split="train_asks[:5000]") ``` Divide este dataset en subdatasets para el entrenamiento y el test: ```py eli5 = eli5.train_test_split(test_size=0.2) ``` Luego observa un ejemplo: ```py >>> eli5["train"][0] {'answers': {'a_id': ['c3d1aib', 'c3d4lya'], 'score': [6, 3], 'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]}, 'answers_urls': {'url': []}, 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']}, 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls': {'url': []}} ``` Observa que `text` es un subcampo anidado dentro del diccionario `answers`. Cuando preproceses el dataset, deberás extraer el subcampo `text` en una columna aparte. ## Preprocesamiento <Youtube id="ma1TrR7gE7I"/> Para modelados de lenguaje causales carga el tokenizador DistilGPT2 para procesar el subcampo `text`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") ``` <Youtube id="8PmhEIXhBvI"/> Para modelados de lenguaje por enmascaramiento carga el tokenizador DistilRoBERTa, en lugar de DistilGPT2: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base") ``` Extrae el subcampo `text` desde su estructura anidado con el método [`flatten`](https://huggingface.co/docs/datasets/process#flatten): ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'answers.a_id': ['c3d1aib', 'c3d4lya'], 'answers.score': [6, 3], 'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"], 'answers_urls.url': [], 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'], 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls.url': []} ``` Cada subcampo es ahora una columna separada, como lo indica el prefijo `answers`. Observa que `answers.text` es una lista. En lugar de tokenizar cada enunciado por separado, convierte la lista en un string para tokenizarlos conjuntamente. Así es como puedes crear una función de preprocesamiento para convertir la lista en una cadena y truncar las secuencias para que no superen la longitud máxima de input de DistilGPT2: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]], truncation=True) ``` Usa de 🤗 Datasets la función [`map`](https://huggingface.co/docs/datasets/process#map) para aplicar la función de preprocesamiento sobre el dataset en su totalidad. Puedes acelerar la función `map` configurando el argumento `batched=True` para procesar múltiples elementos del dataset a la vez y aumentar la cantidad de procesos con `num_proc`. Elimina las columnas que no necesitas: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` Ahora necesitas una segunda función de preprocesamiento para capturar el texto truncado de cualquier ejemplo demasiado largo para evitar cualquier pérdida de información. Esta función de preprocesamiento debería: - Concatenar todo el texto. - Dividir el texto concatenado en trozos más pequeños definidos por un `block_size`. ```py >>> block_size = 128 >>> def group_texts(examples): ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... total_length = (total_length // block_size) * block_size ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` Aplica la función `group_texts` sobre todo el dataset: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` Para modelados de lenguaje causales, usa [`DataCollatorForLanguageModeling`] para crear un lote de ejemplos. Esto también *rellenará dinámicamente* tu texto a la dimensión del elemento más largo del lote para que de esta manera tengan largo uniforme. Si bien es posible rellenar tu texto en la función `tokenizer` mediante el argumento `padding=True`, el rellenado dinámico es más eficiente. <frameworkcontent> <pt> Puedes usar el token de final de secuencia como el token de relleno y asignar `mlm=False`. Esto usará los inputs como etiquetas movidas un elemento hacia la derecha: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) ``` Para modelados de lenguaje por enmascaramiento usa el mismo [`DataCollatorForLanguageModeling`] excepto que deberás especificar `mlm_probability` para enmascarar tokens aleatoriamente cada vez que iteras sobre los datos. ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) ``` </pt> <tf> Puedes usar el token de final de secuencia como el token de relleno y asignar `mlm=False`. Esto usará los inputs como etiquetas movidas un elemento hacia la derecha: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` Para modelados de lenguajes por enmascaramiento usa el mismo [`DataCollatorForLanguageModeling`] excepto que deberás especificar `mlm_probability` para enmascarar tokens aleatoriamente cada vez que iteras sobre los datos. ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` </tf> </frameworkcontent> ## Modelado de lenguaje causal El modelado de lenguaje causal es frecuentemente utilizado para generación de texto. Esta sección te muestra cómo realizar fine-tuning a [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) para generar nuevo texto. ### Entrenamiento <frameworkcontent> <pt> Carga DistilGPT2 con [`AutoModelForCausalLM`]: ```py >>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` <Tip> Si no estás familiarizado con el proceso de realizar fine-tuning sobre un modelo con [`Trainer`], considera el tutorial básico [aquí](../training#finetune-with-trainer)! </Tip> A este punto, solo faltan tres pasos: 1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`]. 2. Pasarle los argumentos de entrenamiento a [`Trainer`] junto con el modelo, dataset, y el data collator. 3. Realiza la llamada [`~Trainer.train`] para realizar el fine-tuning sobre tu modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... eval_strategy="epoch", ... learning_rate=2e-5, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` </pt> <tf> Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: ```py >>> tf_train_set = lm_dataset["train"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... dummy_labels=True, ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = lm_dataset["test"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... dummy_labels=True, ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` <Tip> Si no estás familiarizado con realizar fine-tuning de tus modelos con Keras, considera el tutorial básico [aquí](training#finetune-with-keras)! </Tip> Crea la función optimizadora, la tasa de aprendizaje, y algunos hiperparámetros de entrenamiento: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` Carga DistilGPT2 con [`TFAutoModelForCausalLM`]: ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` Configura el modelo para entrenamiento con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` Llama a [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3) ``` </tf> </frameworkcontent> ## Modelado de lenguaje por enmascaramiento El modelado de lenguaje por enmascaramiento es también conocido como una tarea de rellenar la máscara, pues predice un token enmascarado dada una secuencia. Los modelos de lenguaje por enmascaramiento requieren una buena comprensión del contexto de una secuencia entera, en lugar de solo el contexto a la izquierda. Esta sección te enseña como realizar el fine-tuning de [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) para predecir una palabra enmascarada. ### Entrenamiento <frameworkcontent> <pt> Carga DistilRoBERTa con [`AutoModelForMaskedlM`]: ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` <Tip> Si no estás familiarizado con el proceso de realizar fine-tuning sobre un modelo con [`Trainer`], considera el tutorial básico [aquí](../training#finetune-with-trainer)! </Tip> A este punto, solo faltan tres pasos: 1. Definir tus hiperparámetros de entrenamiento en [`TrainingArguments`]. 2. Pasarle los argumentos de entrenamiento a [`Trainer`] junto con el modelo, dataset, y el data collator. 3. Realiza la llamada [`~Trainer.train`] para realizar el fine-tuning de tu modelo. ```py >>> training_args = TrainingArguments( ... output_dir="./results", ... eval_strategy="epoch", ... learning_rate=2e-5, ... num_train_epochs=3, ... weight_decay=0.01, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` </pt> <tf> Para realizar el fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.to_tf_dataset). Especifica los inputs y etiquetas en `columns`, ya sea para mezclar el dataset, tamaño de lote, y el data collator: ```py >>> tf_train_set = lm_dataset["train"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... dummy_labels=True, ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = lm_dataset["test"].to_tf_dataset( ... columns=["attention_mask", "input_ids", "labels"], ... dummy_labels=True, ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` <Tip> Si no estás familiarizado con realizar fine-tuning de tus modelos con Keras, considera el tutorial básico [aquí](training#finetune-with-keras)! </Tip> Crea la función optimizadora, la tasa de aprendizaje, y algunos hiperparámetros de entrenamiento: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` Carga DistilRoBERTa con [`TFAutoModelForMaskedLM`]: ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilroberta-base") ``` Configura el modelo para entrenamiento con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` Llama a [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3) ``` </tf> </frameworkcontent> <Tip> Para un ejemplo más profundo sobre cómo realizar el fine-tuning sobre un modelo de lenguaje causal, considera [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) o [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). </Tip>
transformers/docs/source/es/tasks/language_modeling.md/0
{ "file_path": "transformers/docs/source/es/tasks/language_modeling.md", "repo_id": "transformers", "token_count": 6287 }
466
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # अनुमान के लिए पाइपलाइन [`pipeline`] किसी भी भाषा, कंप्यूटर दृष्टि, भाषण और मल्टीमॉडल कार्यों पर अनुमान लगाने के लिए [Hub](https://huggingface.co/models) से किसी भी मॉडल का उपयोग करना आसान बनाता है। भले ही आपके पास किसी विशिष्ट तौर-तरीके का अनुभव न हो या आप मॉडलों के पीछे अंतर्निहित कोड से परिचित न हों, फिर भी आप [`pipeline`] के अनुमान के लिए उनका उपयोग कर सकते हैं! यह ट्यूटोरियल आपको ये सिखाएगा: * अनुमान के लिए [`pipeline`] का उपयोग करें। * एक विशिष्ट टोकननाइज़र या मॉडल का उपयोग करें। * ऑडियो, विज़न और मल्टीमॉडल कार्यों के लिए [`pipeline`] का उपयोग करें। <Tip> समर्थित कार्यों और उपलब्ध मापदंडों की पूरी सूची के लिए [`pipeline`] दस्तावेज़ पर एक नज़र डालें। </Tip> ## पाइपलाइन का उपयोग जबकि प्रत्येक कार्य में एक संबद्ध [`pipeline`] होता है, सामान्य [`pipeline`] अमूर्त का उपयोग करना आसान होता है जिसमें शामिल होता है सभी कार्य-विशिष्ट पाइपलाइनें। [`pipeline`] स्वचालित रूप से एक डिफ़ॉल्ट मॉडल और सक्षम प्रीप्रोसेसिंग क्लास लोड करता है आपके कार्य के लिए अनुमान का. आइए स्वचालित वाक् पहचान (एएसआर) के लिए [`pipeline`] का उपयोग करने का उदाहरण लें, या वाक्-से-पाठ. 1. एक [`pipeline`] बनाकर प्रारंभ करें और अनुमान कार्य निर्दिष्ट करें: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. अपना इनपुट [`pipeline`] पर भेजें। वाक् पहचान के मामले में, यह एक ऑडियो इनपुट फ़ाइल है: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` क्या वह परिणाम नहीं जो आपके मन में था? कुछ [सबसे अधिक डाउनलोड किए गए स्वचालित वाक् पहचान मॉडल](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) देखें यह देखने के लिए हब पर जाएं कि क्या आपको बेहतर ट्रांस्क्रिप्शन मिल सकता है। आइए OpenAI से [व्हिस्पर लार्ज-v2](https://huggingface.co/openai/whisper-large) मॉडल आज़माएं। व्हिस्पर जारी किया गया Wav2Vec2 की तुलना में 2 साल बाद, और लगभग 10 गुना अधिक डेटा पर प्रशिक्षित किया गया था। इस प्रकार, यह अधिकांश डाउनस्ट्रीम पर Wav2Vec2 को मात देता है बेंचमार्क. इसमें विराम चिह्न और आवरण की भविष्यवाणी करने का अतिरिक्त लाभ भी है, जिनमें से कोई भी संभव नहीं है Wav2Vec2. आइए इसे यहां आज़माकर देखें कि यह कैसा प्रदर्शन करता है: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` अब यह परिणाम अधिक सटीक दिखता है! Wav2Vec2 बनाम व्हिस्पर पर गहन तुलना के लिए, [ऑडियो ट्रांसफॉर्मर्स कोर्स](https://huggingface.co/learn/audio-course/chapter5/asr_models) देखें। हम वास्तव में आपको विभिन्न भाषाओं में मॉडल, आपके क्षेत्र में विशेषीकृत मॉडल और बहुत कुछ के लिए हब की जांच करने के लिए प्रोत्साहित करते हैं। आप हब पर सीधे अपने ब्राउज़र से मॉडल परिणामों की जांच और तुलना कर सकते हैं कि यह फिट बैठता है या नहीं अन्य मामलों की तुलना में कोने के मामलों को बेहतर ढंग से संभालता है। और यदि आपको अपने उपयोग के मामले के लिए कोई मॉडल नहीं मिलता है, तो आप हमेशा अपना खुद का [प्रशिक्षण](training) शुरू कर सकते हैं! यदि आपके पास कई इनपुट हैं, तो आप अपने इनपुट को एक सूची के रूप में पास कर सकते हैं: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` पाइपलाइनें प्रयोग के लिए बहुत अच्छी हैं क्योंकि एक मॉडल से दूसरे मॉडल पर स्विच करना मामूली काम है; हालाँकि, प्रयोग की तुलना में बड़े कार्यभार के लिए उन्हें अनुकूलित करने के कुछ तरीके हैं। संपूर्ण डेटासेट पर पुनरावृत्ति करने या वेबसर्वर में पाइपलाइनों का उपयोग करने के बारे में निम्नलिखित मार्गदर्शिकाएँ देखें: दस्तावेज़ों में से: * [डेटासेट पर पाइपलाइनों का उपयोग करना](#using-pipelines-on-a-dataset) * [वेबसर्वर के लिए पाइपलाइनों का उपयोग करना](./pipeline_webserver) ## प्राचल [`pipeline`] कई मापदंडों का समर्थन करता है; कुछ कार्य विशिष्ट हैं, और कुछ सभी पाइपलाइनों के लिए सामान्य हैं। सामान्य तौर पर, आप अपनी इच्छानुसार कहीं भी पैरामीटर निर्दिष्ट कर सकते हैं: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` आइए 3 महत्वपूर्ण बातों पर गौर करें: ### उपकरण यदि आप `device=0` का उपयोग करते हैं, तो पाइपलाइन स्वचालित रूप से मॉडल को निर्दिष्ट डिवाइस पर डाल देती है। यह इस पर ध्यान दिए बिना काम करेगा कि आप PyTorch या Tensorflow का उपयोग कर रहे हैं या नहीं। ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` यदि मॉडल एकल GPU के लिए बहुत बड़ा है और आप PyTorch का उपयोग कर रहे हैं, तो आप `device_map="auto"` को स्वचालित रूप से सेट कर सकते हैं निर्धारित करें कि मॉडल वज़न को कैसे लोड और संग्रहीत किया जाए। `device_map` तर्क का उपयोग करने के लिए 🤗 [Accelerate](https://huggingface.co/docs/accelerate) की आवश्यकता होती है पैकेट: ```bash pip install --upgrade accelerate ``` निम्नलिखित कोड स्वचालित रूप से सभी डिवाइसों में मॉडल भार को लोड और संग्रहीत करता है: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` ध्यान दें कि यदि `device_map='auto'` पारित हो गया है, तो अपनी `pipeline` को चालू करते समय `device=device` तर्क जोड़ने की कोई आवश्यकता नहीं है क्योंकि आपको कुछ अप्रत्याशित व्यवहार का सामना करना पड़ सकता है! ### बैच का आकार डिफ़ॉल्ट रूप से, पाइपलाइनें [यहां](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching) विस्तार से बताए गए कारणों के लिए बैच अनुमान नहीं लगाएंगी। इसका कारण यह है कि बैचिंग आवश्यक रूप से तेज़ नहीं है, और वास्तव में कुछ मामलों में काफी धीमी हो सकती है। लेकिन अगर यह आपके उपयोग के मामले में काम करता है, तो आप इसका उपयोग कर सकते हैं: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` यह प्रदान की गई 4 ऑडियो फाइलों पर पाइपलाइन चलाता है, लेकिन यह उन्हें 2 के बैच में पास करेगा आपसे किसी और कोड की आवश्यकता के बिना मॉडल (जो एक जीपीयू पर है, जहां बैचिंग से मदद मिलने की अधिक संभावना है) पर जाएं। आउटपुट हमेशा उसी से मेल खाना चाहिए जो आपको बैचिंग के बिना प्राप्त हुआ होगा। इसका उद्देश्य केवल पाइपलाइन से अधिक गति प्राप्त करने में आपकी सहायता करना है। पाइपलाइनें बैचिंग की कुछ जटिलताओं को भी कम कर सकती हैं क्योंकि, कुछ पाइपलाइनों के लिए, एक एकल आइटम (जैसे एक लंबी ऑडियो फ़ाइल) को एक मॉडल द्वारा संसाधित करने के लिए कई भागों में विभाजित करने की आवश्यकता होती है। पाइपलाइन आपके लिए यह [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) करती है। ### कार्य विशिष्ट प्राचल सभी कार्य कार्य विशिष्ट प्राचल प्रदान करते हैं जो आपको अपना काम पूरा करने में मदद करने के लिए अतिरिक्त लचीलेपन और विकल्पों की अनुमति देते हैं। उदाहरण के लिए, [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] विधि में एक `return_timestamps` प्राचल है जो वीडियो उपशीर्षक के लिए आशाजनक लगता है: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` जैसा कि आप देख सकते हैं, मॉडल ने पाठ का अनुमान लगाया और **when** विभिन्न वाक्यों का उच्चारण किया गया तो आउटपुट भी दिया। प्रत्येक कार्य के लिए कई प्राचल उपलब्ध हैं, इसलिए यह देखने के लिए कि आप किसके साथ छेड़छाड़ कर सकते हैं, प्रत्येक कार्य का API संदर्भ देखें! उदाहरण के लिए, [`~transformers.AutomaticSpeechRecognitionPipeline`] में एक `chunk_length_s` प्राचल है जो सहायक है वास्तव में लंबी ऑडियो फ़ाइलों पर काम करने के लिए (उदाहरण के लिए, संपूर्ण फिल्मों या घंटे-लंबे वीडियो को उपशीर्षक देना) जो आमतौर पर एक मॉडल होता है अपने आप संभाल नहीं सकता: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30, return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav") {'text': " Chapter 16. I might have told you of the beginning of this liaison in a few lines, but I wanted you to see every step by which we came. I, too, agree to whatever Marguerite wished, Marguerite to be unable to live apart from me. It was the day after the evening... ``` यदि आपको कोई ऐसा पैरामीटर नहीं मिल रहा है जो वास्तव में आपकी मदद करेगा, तो बेझिझक [अनुरोध करें](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## डेटासेट पर पाइपलाइनों का उपयोग करना पाइपलाइन बड़े डेटासेट पर भी अनुमान चला सकती है। ऐसा करने का सबसे आसान तरीका हम एक पुनरावर्तक का उपयोग करने की सलाह देते हैं: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` पुनरावर्तक `data()` प्रत्येक परिणाम और पाइपलाइन स्वचालित रूप से उत्पन्न करता है पहचानता है कि इनपुट पुनरावर्तनीय है और डेटा प्राप्त करना शुरू कर देगा यह इसे GPU पर प्रोसेस करना जारी रखता है (यह हुड के तहत [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) का उपयोग करता है)। यह महत्वपूर्ण है क्योंकि आपको संपूर्ण डेटासेट के लिए मेमोरी आवंटित करने की आवश्यकता नहीं है और आप जितनी जल्दी हो सके GPU को फीड कर सकते हैं। चूंकि बैचिंग से चीज़ें तेज़ हो सकती हैं, इसलिए यहां `batch_size` प्राचल को ट्यून करने का प्रयास करना उपयोगी हो सकता है। किसी डेटासेट पर पुनरावृति करने का सबसे सरल तरीका बस एक को 🤗 [Dataset](https://github.com/huggingface/datasets/) से लोड करना है: ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## वेबसर्वर के लिए पाइपलाइनों का उपयोग करना <Tip> एक अनुमान इंजन बनाना एक जटिल विषय है जो अपने आप में उपयुक्त है पृष्ठ। </Tip> [Link](./pipeline_webserver) ## विज़न पाइपलाइन दृष्टि कार्यों के लिए [`pipeline`] का उपयोग करना व्यावहारिक रूप से समान है। अपना कार्य निर्दिष्ट करें और अपनी छवि क्लासिफायरियर को भेजें। छवि एक लिंक, एक स्थानीय पथ या बेस64-एन्कोडेड छवि हो सकती है। उदाहरण के लिए, बिल्ली की कौन सी प्रजाति नीचे दिखाई गई है? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## पाठ पाइपलाइन NLP कार्यों के लिए [`pipeline`] का उपयोग करना व्यावहारिक रूप से समान है। ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## बहुविध पाइपलाइन [`pipeline`] एक से अधिक तौर-तरीकों का समर्थन करती है। उदाहरण के लिए, एक दृश्य प्रश्न उत्तर (VQA) कार्य पाठ और छवि को जोड़ता है। अपनी पसंद के किसी भी छवि लिंक और छवि के बारे में कोई प्रश्न पूछने के लिए स्वतंत्र महसूस करें। छवि एक URL या छवि का स्थानीय पथ हो सकती है। उदाहरण के लिए, यदि आप इस [invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png) का उपयोग करते हैं: ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> ऊपर दिए गए उदाहरण को चलाने के लिए आपको 🤗 ट्रांसफॉर्मर के अलावा [`pytesseract`](https://pypi.org/project/pytesseract/) इंस्टॉल करना होगा: ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## 🤗 `त्वरण` के साथ बड़े मॉडलों पर `pipeline` का उपयोग करना: आप 🤗 `accelerate` का उपयोग करके बड़े मॉडलों पर आसानी से `pipeline` चला सकते हैं! पहले सुनिश्चित करें कि आपने `accelerate` को `pip install accelerate` के साथ इंस्टॉल किया है। सबसे पहले `device_map='auto'` का उपयोग करके अपना मॉडल लोड करें! हम अपने उदाहरण के लिए `facebook/opt-1.3b` का उपयोग करेंगे। ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` यदि आप `bitsandbytes` इंस्टॉल करते हैं और `load_in_8bit=True` तर्क जोड़ते हैं तो आप 8-बिट लोडेड मॉडल भी पास कर सकते हैं ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` ध्यान दें कि आप चेकपॉइंट को किसी भी हगिंग फेस मॉडल से बदल सकते हैं जो BLOOM जैसे बड़े मॉडल लोडिंग का समर्थन करता है!
transformers/docs/source/hi/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/hi/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 13936 }
467
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Condividi un modello Gli ultimi due tutorial ti hanno mostrato come puoi fare fine-tuning di un modello con PyTorch, Keras e 🤗 Accelerate per configurazioni distribuite. Il prossimo passo è quello di condividere il tuo modello con la community! In Hugging Face, crediamo nella condivisione della conoscenza e delle risorse in modo da democratizzare l'intelligenza artificiale per chiunque. Ti incoraggiamo a considerare di condividere il tuo modello con la community per aiutare altre persone a risparmiare tempo e risorse. In questo tutorial, imparerai due metodi per la condivisione di un modello trained o fine-tuned nel [Model Hub](https://huggingface.co/models): - Condividi in modo programmatico i tuoi file nell'Hub. - Trascina i tuoi file nell'Hub mediante interfaccia grafica. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> Per condividere un modello con la community, hai bisogno di un account su [huggingface.co](https://huggingface.co/join). Puoi anche unirti ad un'organizzazione esistente o crearne una nuova. </Tip> ## Caratteristiche dei repository Ogni repository nel Model Hub si comporta come un tipico repository di GitHub. I nostri repository offrono il versionamento, la cronologia dei commit, e la possibilità di visualizzare le differenze. Il versionamento all'interno del Model Hub è basato su git e [git-lfs](https://git-lfs.github.com/). In altre parole, puoi trattare un modello come un unico repository, consentendo un maggiore controllo degli accessi e maggiore scalabilità. Il controllo delle versioni consente *revisions*, un metodo per appuntare una versione specifica di un modello con un hash di commit, un tag o un branch. Come risultato, puoi caricare una specifica versione di un modello con il parametro `revision`: ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="v2.0.1" # nome di un tag, di un branch, o commit hash ... ) ``` Anche i file possono essere modificati facilmente in un repository ed è possibile visualizzare la cronologia dei commit e le differenze: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## Configurazione Prima di condividere un modello nell'Hub, hai bisogno delle tue credenziali di Hugging Face. Se hai accesso ad un terminale, esegui il seguente comando nell'ambiente virtuale in cui è installata la libreria 🤗 Transformers. Questo memorizzerà il tuo token di accesso nella cartella cache di Hugging Face (di default `~/.cache/`): ```bash huggingface-cli login ``` Se stai usando un notebook come Jupyter o Colaboratory, assicurati di avere la libreria [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library) installata. Questa libreria ti permette di interagire in maniera programmatica con l'Hub. ```bash pip install huggingface_hub ``` Utilizza `notebook_login` per accedere all'Hub, e segui il link [qui](https://huggingface.co/settings/token) per generare un token con cui effettuare il login: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Converti un modello per tutti i framework Per assicurarti che il tuo modello possa essere utilizzato da persone che lavorano con un framework differente, ti raccomandiamo di convertire e caricare il tuo modello sia con i checkpoint di PyTorch che con quelli di TensorFlow. Anche se è possibile caricare il modello da un framework diverso, se si salta questo passaggio, il caricamento sarà più lento perché 🤗 Transformers ha bisogno di convertire i checkpoint al momento. Convertire un checkpoint per un altro framework è semplice. Assicurati di avere PyTorch e TensorFlow installati (vedi [qui](installation) per le istruzioni d'installazione), e poi trova il modello specifico per il tuo compito nell'altro framework. <frameworkcontent> <pt> Specifica `from_tf=True` per convertire un checkpoint da TensorFlow a PyTorch: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained( ... "path/verso/il-nome-magnifico-che-hai-scelto", from_tf=True ... ) >>> pt_model.save_pretrained("path/verso/il-nome-magnifico-che-hai-scelto") ``` </pt> <tf> Specifica `from_pt=True` per convertire un checkpoint da PyTorch a TensorFlow: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained( ... "path/verso/il-nome-magnifico-che-hai-scelto", from_pt=True ... ) ``` Poi puoi salvare il tuo nuovo modello in TensorFlow con il suo nuovo checkpoint: ```py >>> tf_model.save_pretrained("path/verso/il-nome-magnifico-che-hai-scelto") ``` </tf> <jax> Se un modello è disponibile in Flax, puoi anche convertire un checkpoint da PyTorch a Flax: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/verso/il-nome-magnifico-che-hai-scelto", from_pt=True ... ) ``` </jax> </frameworkcontent> ## Condividi un modello durante il training <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> Condividere un modello nell'Hub è tanto semplice quanto aggiungere un parametro extra o un callback. Ricorda dal [tutorial sul fine-tuning](training), la classe [`TrainingArguments`] è dove specifichi gli iperparametri e le opzioni addizionali per l'allenamento. Una di queste opzioni di training include l'abilità di condividere direttamente un modello nell'Hub. Imposta `push_to_hub=True` in [`TrainingArguments`]: ```py >>> training_args = TrainingArguments(output_dir="il-mio-bellissimo-modello", push_to_hub=True) ``` Passa gli argomenti per il training come di consueto al [`Trainer`]: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` Dopo aver effettuato il fine-tuning del tuo modello, chiama [`~transformers.Trainer.push_to_hub`] sul [`Trainer`] per condividere il modello allenato nell'Hub. 🤗 Transformers aggiungerà in modo automatico persino gli iperparametri, i risultati del training e le versioni del framework alla scheda del tuo modello (model card, in inglese)! ```py >>> trainer.push_to_hub() ``` </pt> <tf> Condividi un modello nell'Hub con [`PushToHubCallback`]. Nella funzione [`PushToHubCallback`], aggiungi: - Una directory di output per il tuo modello. - Un tokenizer. - L'`hub_model_id`, che è il tuo username sull'Hub e il nome del modello. ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./il_path_dove_salvare_il_tuo_modello", ... tokenizer=tokenizer, ... hub_model_id="il-tuo-username/il-mio-bellissimo-modello", ... ) ``` Aggiungi il callback a [`fit`](https://keras.io/api/models/model_training_apis/), e 🤗 Transformers caricherà il modello allenato nell'Hub: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## Utilizzare la funzione `push_to_hub` Puoi anche chiamare `push_to_hub` direttamente sul tuo modello per caricarlo nell'Hub. Specifica il nome del tuo modello in `push_to_hub`: ```py >>> pt_model.push_to_hub("il-mio-bellissimo-modello") ``` Questo crea un repository sotto il proprio username con il nome del modello `il-mio-bellissimo-modello`. Ora chiunque può caricare il tuo modello con la funzione `from_pretrained`: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("il-tuo-username/il-mio-bellissimo-modello") ``` Se fai parte di un'organizzazione e vuoi invece condividere un modello sotto il nome dell'organizzazione, aggiungi il parametro `organization`: ```py >>> pt_model.push_to_hub("il-mio-bellissimo-modello", organization="la-mia-fantastica-org") ``` La funzione `push_to_hub` può essere anche utilizzata per aggiungere altri file al repository del modello. Per esempio, aggiungi un tokenizer ad un repository di un modello: ```py >>> tokenizer.push_to_hub("il-mio-bellissimo-modello") ``` O magari potresti voler aggiungere la versione di TensorFlow del tuo modello PyTorch a cui hai fatto fine-tuning: ```py >>> tf_model.push_to_hub("il-mio-bellissimo-modello") ``` Ora quando navighi nel tuo profilo Hugging Face, dovresti vedere il tuo repository del modello appena creato. Premendo sulla scheda **Files** vengono visualizzati tutti i file caricati nel repository. Per maggiori dettagli su come creare e caricare file ad un repository, fai riferimento alla documentazione [qui](https://huggingface.co/docs/hub/how-to-upstream). ## Carica un modello utilizzando l'interfaccia web Chi preferisce un approccio senza codice può caricare un modello tramite l'interfaccia web dell'hub. Visita [huggingface.co/new](https://huggingface.co/new) per creare un nuovo repository: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) Da qui, aggiungi alcune informazioni sul tuo modello: - Seleziona il/la **owner** del repository. Puoi essere te o qualunque organizzazione di cui fai parte. - Scegli un nome per il tuo modello, il quale sarà anche il nome del repository. - Scegli se il tuo modello è pubblico o privato. - Specifica la licenza utilizzata per il tuo modello. Ora premi sulla scheda **Files** e premi sul pulsante **Add file** per caricare un nuovo file al tuo repository. Trascina poi un file per caricarlo e aggiungere un messaggio di commit. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## Aggiungi una scheda del modello Per assicurarti che chiunque possa comprendere le abilità, limitazioni, i potenziali bias e le considerazioni etiche del tuo modello, per favore aggiungi una scheda del modello (model card, in inglese) al tuo repository. La scheda del modello è definita nel file `README.md`. Puoi aggiungere una scheda del modello: * Creando manualmente e caricando un file `README.md`. * Premendo sul pulsante **Edit model card** nel repository del tuo modello. Dai un'occhiata alla [scheda del modello](https://huggingface.co/distilbert/distilbert-base-uncased) di DistilBert per avere un buon esempio del tipo di informazioni che una scheda di un modello deve includere. Per maggiori dettagli legati ad altre opzioni che puoi controllare nel file `README.md`, come l'impatto ambientale o widget di esempio, fai riferimento alla documentazione [qui](https://huggingface.co/docs/hub/models-cards).
transformers/docs/source/it/model_sharing.md/0
{ "file_path": "transformers/docs/source/it/model_sharing.md", "repo_id": "transformers", "token_count": 4025 }
468
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Esporta modelli 🤗 Transformers Se devi implementare 🤗 modelli Transformers in ambienti di produzione, noi consigliamo di esportarli in un formato serializzato che può essere caricato ed eseguito su runtime e hardware specializzati. In questa guida ti mostreremo come farlo esporta 🤗 Modelli Transformers in due formati ampiamente utilizzati: ONNX e TorchScript. Una volta esportato, un modello può essere ottimizato per l'inferenza tramite tecniche come la quantizzazione e soppressione. Se sei interessato a ottimizzare i tuoi modelli per l'esecuzione con la massima efficienza, dai un'occhiata a [🤗 Optimum library](https://github.com/huggingface/optimum). ## ONNX Il progetto [ONNX (Open Neural Network eXchange)](http://onnx.ai) Il progetto onnx è un open standard che definisce un insieme comune di operatori e un formato di file comune a rappresentano modelli di deep learning in un'ampia varietà di framework, tra cui PyTorch e TensorFlow. Quando un modello viene esportato nel formato ONNX, questi operatori sono usati per costruire un grafico computazionale (often called an _intermediate representation_) che rappresenta il flusso di dati attraverso la rete neurale. Esponendo un grafico con operatori e tipi di dati standardizzati, ONNX rende più facile passare da un framework all'altro. Ad esempio, un modello allenato in PyTorch può essere esportato in formato ONNX e quindi importato in TensorFlow (e viceversa). 🤗 Transformers fornisce un pacchetto `transformers.onnx` che ti consente di convertire i checkpoint del modello in un grafico ONNX sfruttando gli oggetti di configurazione. Questi oggetti di configurazione sono già pronti per una serie di architetture di modelli, e sono progettati per essere facilmente estensibili ad altre architetture. Le configurazioni pronte includono le seguenti architetture: <!--This table is automatically generated by `make fix-copies`, do not fill manually!--> - ALBERT - BART - BEiT - BERT - BigBird - BigBird-Pegasus - Blenderbot - BlenderbotSmall - CamemBERT - ConvBERT - Data2VecText - Data2VecVision - DeiT - DistilBERT - ELECTRA - FlauBERT - GPT Neo - GPT-J - I-BERT - LayoutLM - M2M100 - Marian - mBART - MobileBERT - OpenAI GPT-2 - Perceiver - PLBart - RoBERTa - RoFormer - SqueezeBERT - T5 - ViT - XLM - XLM-RoBERTa - XLM-RoBERTa-XL Nelle prossime due sezioni, ti mostreremo come: * Esporta un modello supportato usando il pacchetto `transformers.onnx`. * Esporta un modello personalizzato per un'architettura non supportata. ### Esportazione di un modello in ONNX Per esportare un modello 🤗 Transformers in ONNX, dovrai prima installarne alcune dipendenze extra: ```bash pip install transformers[onnx] ``` Il pacchetto `transformers.onnx` può essere usato come modulo Python: ```bash python -m transformers.onnx --help usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating where to store generated ONNX model. optional arguments: -h, --help show this help message and exit -m MODEL, --model MODEL Model ID on huggingface.co or path on disk to load model from. --feature {causal-lm, ...} The type of features to export the model with. --opset OPSET ONNX opset version to export the model with. --atol ATOL Absolute difference tolerance when validating the model. ``` L'esportazione di un checkpoint utilizzando una configurazione già pronta può essere eseguita come segue: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased onnx/ ``` che dovrebbe mostrare i seguenti log: ```bash Validating ONNX model... -[✓] ONNX model output names match reference model ({'last_hidden_state'}) - Validating ONNX Model output "last_hidden_state": -[✓] (2, 8, 768) matches (2, 8, 768) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx ``` Questo esporta un grafico ONNX del checkpoint definito dall'argomento `--model`. In questo esempio è `distilbert/distilbert-base-uncased`, ma può essere qualsiasi checkpoint Hugging Face Hub o uno memorizzato localmente. Il file risultante `model.onnx` può quindi essere eseguito su uno dei [tanti acceleratori](https://onnx.ai/supported-tools.html#deployModel) che supportano il lo standard ONNX. Ad esempio, possiamo caricare ed eseguire il modello con [ONNX Runtime](https://onnxruntime.ai/) come segue: ```python >>> from transformers import AutoTokenizer >>> from onnxruntime import InferenceSession >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> session = InferenceSession("onnx/model.onnx") >>> # ONNX Runtime expects NumPy arrays as input >>> inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") >>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) ``` I nomi di output richiesti (cioè `["last_hidden_state"]`) possono essere ottenuti dando un'occhiata alla configurazione ONNX di ogni modello. Ad esempio, per DistilBERT abbiamo: ```python >>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig >>> config = DistilBertConfig() >>> onnx_config = DistilBertOnnxConfig(config) >>> print(list(onnx_config.outputs.keys())) ["last_hidden_state"] ``` Il processo è identico per i checkpoint TensorFlow sull'hub. Ad esempio, noi possiamo esportare un checkpoint TensorFlow puro da [Keras organizzazione](https://huggingface.co/keras-io) come segue: ```bash python -m transformers.onnx --model=keras-io/transformers-qa onnx/ ``` Per esportare un modello memorizzato localmente, devi disporre dei pesi del modello e file tokenizer memorizzati in una directory. Ad esempio, possiamo caricare e salvare un checkpoint come segue: <frameworkcontent> <pt> ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> # Load tokenizer and PyTorch weights form the Hub >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> pt_model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") >>> # Save to disk >>> tokenizer.save_pretrained("local-pt-checkpoint") >>> pt_model.save_pretrained("local-pt-checkpoint") ``` Una volta salvato il checkpoint, possiamo esportarlo su ONNX puntando l'argomento `--model` del pacchetto `transformers.onnx` nella directory desiderata: ```bash python -m transformers.onnx --model=local-pt-checkpoint onnx/ ``` </pt> <tf> ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> # Load tokenizer and TensorFlow weights from the Hub >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") >>> # Save to disk >>> tokenizer.save_pretrained("local-tf-checkpoint") >>> tf_model.save_pretrained("local-tf-checkpoint") ``` Once the checkpoint is saved, we can export it to ONNX by pointing the `--model` argument of the `transformers.onnx` package to the desired directory: ```bash python -m transformers.onnx --model=local-tf-checkpoint onnx/ ``` </tf> </frameworkcontent> ### Selezione delle caratteristiche per diverse topologie di modello Ogni configurazione già pronta viene fornita con una serie di _caratteristiche_ che ti consentono di esportare modelli per diversi tipi di topologie o attività. Come mostrato nella tabella di seguito, ogni caratteristica è associata a una diversa Auto Class: | Caratteristica | Auto Class | | ------------------------------------ | ------------------------------------ | | `causal-lm`, `causal-lm-with-past` | `AutoModelForCausalLM` | | `default`, `default-with-past` | `AutoModel` | | `masked-lm` | `AutoModelForMaskedLM` | | `question-answering` | `AutoModelForQuestionAnswering` | | `seq2seq-lm`, `seq2seq-lm-with-past` | `AutoModelForSeq2SeqLM` | | `sequence-classification` | `AutoModelForSequenceClassification` | | `token-classification` | `AutoModelForTokenClassification` | Per ciascuna configurazione, puoi trovare l'elenco delle funzionalità supportate tramite il `FeaturesManager`. Ad esempio, per DistilBERT abbiamo: ```python >>> from transformers.onnx.features import FeaturesManager >>> distilbert_features = list(FeaturesManager.get_supported_features_for_model_type("distilbert").keys()) >>> print(distilbert_features) ["default", "masked-lm", "causal-lm", "sequence-classification", "token-classification", "question-answering"] ``` Puoi quindi passare una di queste funzionalità all'argomento `--feature` nel pacchetto `transformers.onnx`. Ad esempio, per esportare un modello di classificazione del testo possiamo scegliere un modello ottimizzato dall'Hub ed eseguire: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased-finetuned-sst-2-english \ --feature=sequence-classification onnx/ ``` che visualizzerà i seguenti registri: ```bash Validating ONNX model... -[✓] ONNX model output names match reference model ({'logits'}) - Validating ONNX Model output "logits": -[✓] (2, 2) matches (2, 2) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx ``` Puoi notare che in questo caso, i nomi di output del modello ottimizzato sono `logits` invece di `last_hidden_state` che abbiamo visto con il checkpoint `distilbert/distilbert-base-uncased` precedente. Questo è previsto dal modello ottimizato visto che ha una testa di e. <Tip> Le caratteristiche che hanno un suffisso `wtih-past` (ad es. `causal-lm-with-past`) corrispondono a topologie di modello con stati nascosti precalcolati (chiave e valori nei blocchi di attenzione) che possono essere utilizzati per la decodifica autoregressiva veloce. </Tip> ### Esportazione di un modello per un'architettura non supportata Se desideri esportare un modello la cui architettura non è nativamente supportata dalla libreria, ci sono tre passaggi principali da seguire: 1. Implementare una configurazione ONNX personalizzata. 2. Esportare il modello in ONNX. 3. Convalidare gli output di PyTorch e dei modelli esportati. In questa sezione, vedremo come DistilBERT è stato implementato per mostrare cosa è coinvolto in ogni passaggio. #### Implementazione di una configurazione ONNX personalizzata Iniziamo con l'oggetto di configurazione ONNX. Forniamo tre classi astratte da cui ereditare, a seconda del tipo di archittettura del modello che desideri esportare: * I modelli basati su encoder ereditano da [`~onnx.config.OnnxConfig`] * I modelli basati su decoder ereditano da [`~onnx.config.OnnxConfigWithPast`] * I modelli encoder-decoder ereditano da[`~onnx.config.OnnxSeq2SeqConfigWithPast`] <Tip> Un buon modo per implementare una configurazione ONNX personalizzata è guardare l'implementazione esistente nel file `configuration_<model_name>.py` di un'architettura simile. </Tip> Poiché DistilBERT è un modello basato su encoder, la sua configurazione eredita da `OnnxConfig`: ```python >>> from typing import Mapping, OrderedDict >>> from transformers.onnx import OnnxConfig >>> class DistilBertOnnxConfig(OnnxConfig): ... @property ... def inputs(self) -> Mapping[str, Mapping[int, str]]: ... return OrderedDict( ... [ ... ("input_ids", {0: "batch", 1: "sequence"}), ... ("attention_mask", {0: "batch", 1: "sequence"}), ... ] ... ) ``` Ogni oggetto di configurazione deve implementare la proprietà `inputs` e restituire una mappatura, dove ogni chiave corrisponde a un input previsto e ogni valore indica l'asse di quell'input. Per DistilBERT, possiamo vedere che sono richiesti due input: `input_ids` e `attention_mask`. Questi inputs hanno la stessa forma di `(batch_size, sequence_length)` per questo motivo vediamo gli stessi assi usati nella configurazione. <Tip> Puoi notare che la proprietà `inputs` per `DistilBertOnnxConfig` restituisce un `OrdinatoDict`. Ciò garantisce che gli input corrispondano alla loro posizione relativa all'interno del metodo `PreTrainedModel.forward()` durante il tracciamento del grafico. Raccomandiamo di usare un `OrderedDict` per le proprietà `inputs` e `outputs` quando si implementano configurazioni ONNX personalizzate. </Tip> Dopo aver implementato una configurazione ONNX, è possibile istanziarla fornendo alla configurazione del modello base come segue: ```python >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased") >>> onnx_config = DistilBertOnnxConfig(config) ``` L'oggetto risultante ha diverse proprietà utili. Ad esempio è possibile visualizzare il Set operatore ONNX che verrà utilizzato durante l'esportazione: ```python >>> print(onnx_config.default_onnx_opset) 11 ``` È inoltre possibile visualizzare gli output associati al modello come segue: ```python >>> print(onnx_config.outputs) OrderedDict([("last_hidden_state", {0: "batch", 1: "sequence"})]) ``` Puoi notare che la proprietà degli output segue la stessa struttura degli input; esso restituisce un `OrderedDict` di output con nome e le loro forme. La struttura di output è legato alla scelta della funzione con cui viene inizializzata la configurazione. Per impostazione predefinita, la configurazione ONNX viene inizializzata con la funzione 'predefinita' che corrisponde all'esportazione di un modello caricato con la classe `AutoModel`. Se tu desideri esportare una topologia di modello diversa, è sufficiente fornire una funzionalità diversa a l'argomento `task` quando inizializzi la configurazione ONNX. Ad esempio, se volevamo esportare DistilBERT con una testa di classificazione per sequenze, potremmo usare: ```python >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased") >>> onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task="sequence-classification") >>> print(onnx_config_for_seq_clf.outputs) OrderedDict([('logits', {0: 'batch'})]) ``` <Tip> Tutte le proprietà e i metodi di base associati a [`~onnx.config.OnnxConfig`] e le altre classi di configurazione possono essere sovrascritte se necessario. Guarda [`BartOnnxConfig`] per un esempio avanzato. </Tip> #### Esportazione del modello Una volta implementata la configurazione ONNX, il passaggio successivo consiste nell'esportare il modello. Qui possiamo usare la funzione `export()` fornita dal pacchetto `transformers.onnx`. Questa funzione prevede la configurazione ONNX, insieme con il modello base e il tokenizer e il percorso per salvare il file esportato: ```python >>> from pathlib import Path >>> from transformers.onnx import export >>> from transformers import AutoTokenizer, AutoModel >>> onnx_path = Path("model.onnx") >>> model_ckpt = "distilbert/distilbert-base-uncased" >>> base_model = AutoModel.from_pretrained(model_ckpt) >>> tokenizer = AutoTokenizer.from_pretrained(model_ckpt) >>> onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path) ``` Gli `onnx_inputs` e `onnx_outputs` restituiti dalla funzione `export()` sono liste di chiavi definite nelle proprietà di `input` e `output` della configurazione. Una volta esportato il modello, puoi verificare che il modello sia ben formato come segue: ```python >>> import onnx >>> onnx_model = onnx.load("model.onnx") >>> onnx.checker.check_model(onnx_model) ``` <Tip> Se il tuo modello è più largo di 2 GB, vedrai che molti file aggiuntivi sono creati durante l'esportazione. Questo è _previsto_ perché ONNX utilizza [Protocol Buffer](https://developers.google.com/protocol-buffers/) per memorizzare il modello e questi hanno un limite di dimensione 2 GB. Vedi la [Documentazione ONNX](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md) per istruzioni su come caricare modelli con dati esterni. </Tip> #### Convalida degli output del modello Il passaggio finale consiste nel convalidare gli output dal modello di base e quello esportato corrispondere entro una soglia di tolleranza assoluta. Qui possiamo usare la Funzione `validate_model_outputs()` fornita dal pacchetto `transformers.onnx` come segue: ```python >>> from transformers.onnx import validate_model_outputs >>> validate_model_outputs( ... onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation ... ) ``` Questa funzione usa il metodo `OnnxConfig.generate_dummy_inputs()` per generare input per il modello di base e quello esportato e la tolleranza assoluta può essere definita nella configurazione. Generalmente troviamo una corrispondenza numerica nell'intervallo da 1e-6 a 1e-4, anche se è probabile che qualsiasi cosa inferiore a 1e-3 vada bene. ### Contribuire con una nuova configurazione a 🤗 Transformers Stiamo cercando di espandere l'insieme di configurazioni già pronte e di accettare contributi della community! Se vuoi contribuire con la tua aggiunta nella libreria, dovrai: * Implementare la configurazione ONNX nella corrispondente `configuration file _<model_name>.py` * Includere l'architettura del modello e le funzioni corrispondenti in [`~onnx.features.FeatureManager`] * Aggiungere la tua architettura del modello ai test in `test_onnx_v2.py` Scopri come stato contribuito la configurazione per [IBERT](https://github.com/huggingface/transformers/pull/14868/files) per avere un'idea di cosa è coinvolto. ## TorchScript <Tip> Questo è l'inizio dei nostri esperimenti con TorchScript e stiamo ancora esplorando le sue capacità con modelli con variable-input-size. È una nostra priorità e approfondiremo le nostre analisi nelle prossime versioni, con più esempi di codici, un'implementazione più flessibile e benchmark che confrontano i codici basati su Python con quelli compilati con TorchScript. </Tip> Secondo la documentazione di Pytorch: "TorchScript è un modo per creare modelli serializzabili e ottimizzabili da codice Pytorch". I due moduli di Pytorch [JIT e TRACE](https://pytorch.org/docs/stable/jit.html) consentono allo sviluppatore di esportare il loro modello da riutilizzare in altri programmi, come i programmi C++ orientati all'efficienza. Abbiamo fornito un'interfaccia che consente l'esportazione di modelli 🤗 Transformers in TorchScript in modo che possano essere riutilizzati in un ambiente diverso rispetto a un programma Python basato su Pytorch. Qui spieghiamo come esportare e utilizzare i nostri modelli utilizzando TorchScript. Esportare un modello richiede due cose: - Un passaggio in avanti con input fittizzi. - Istanziazione del modello con flag `torchscript`. Queste necessità implicano diverse cose a cui gli sviluppatori dovrebbero prestare attenzione. Questi dettagli mostrati sotto. ### Flag TorchScript e pesi legati Questo flag è necessario perché la maggior parte dei modelli linguistici in questo repository hanno pesi legati tra il loro strato "Embedding" e lo strato "Decoding". TorchScript non consente l'esportazione di modelli che hanno pesi legati, quindi è necessario prima slegare e clonare i pesi. Ciò implica che i modelli istanziati con il flag `torchscript` hanno il loro strato `Embedding` e strato `Decoding` separato, il che significa che non dovrebbero essere addestrati in futuro. L'allenamento de-sincronizza i due strati, portando a risultati inaspettati. Questo non è il caso per i modelli che non hanno una testa del modello linguistico, poiché quelli non hanno pesi legati. Questi modelli può essere esportato in sicurezza senza il flag `torchscript`. ### Input fittizi e standard lengths Gli input fittizzi sono usati per fare un modello passaggio in avanti . Mentre i valori degli input si propagano attraverso i strati, Pytorch tiene traccia delle diverse operazioni eseguite su ciascun tensore. Queste operazioni registrate vengono quindi utilizzate per creare la "traccia" del modello. La traccia viene creata relativamente alle dimensioni degli input. È quindi vincolato dalle dimensioni dell'input fittizio e non funzionerà per altre lunghezze di sequenza o dimensioni batch. Quando si proverà con una dimensione diversa, ci sarà errore come: `La dimensione espansa del tensore (3) deve corrispondere alla dimensione esistente (7) nella dimensione non singleton 2` will be raised. Si consiglia pertanto di tracciare il modello con una dimensione di input fittizia grande almeno quanto il più grande input che verrà fornito al modello durante l'inferenza. È possibile eseguire il padding per riempire i valori mancanti. Il modello sarà tracciato con una grande dimensione di input, tuttavia, anche le dimensioni della diverse matrici saranno grandi, risultando in più calcoli. Si raccomanda di prestare attenzione al numero totale di operazioni eseguite su ciascun input e di seguire da vicino le prestazioni durante l'esportazione di modelli di sequenza-lunghezza variabili. ### Usare TorchSscript in Python Di seguito è riportato un esempio, che mostra come salvare, caricare modelli e come utilizzare la traccia per l'inferenza. #### Salvare un modello Questo frammento di codice mostra come usare TorchScript per esportare un `BertModel`. Qui il `BertModel` è istanziato secondo una classe `BertConfig` e quindi salvato su disco con il nome del file `traced_bert.pt` ```python from transformers import BertModel, BertTokenizer, BertConfig import torch enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Tokenizing input text text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" tokenized_text = enc.tokenize(text) # Masking one of the input tokens masked_index = 8 tokenized_text[masked_index] = "[MASK]" indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # Creating a dummy input tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] # Initializing the model with the torchscript flag # Flag set to True even though it is not necessary as this model does not have an LM Head. config = BertConfig( vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True, ) # Instantiating the model model = BertModel(config) # The model needs to be in evaluation mode model.eval() # If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True) # Creating the trace traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, "traced_bert.pt") ``` #### Caricare un modello Questo frammento di codice mostra come caricare il `BertModel` che era stato precedentemente salvato su disco con il nome `traced_bert.pt`. Stiamo riutilizzando il `dummy_input` precedentemente inizializzato. ```python loaded_model = torch.jit.load("traced_bert.pt") loaded_model.eval() all_encoder_layers, pooled_output = loaded_model(*dummy_input) ``` #### Utilizzare un modello tracciato per l'inferenza Usare il modello tracciato per l'inferenza è semplice come usare il suo metodo dunder `__call__`: ```python traced_model(tokens_tensor, segments_tensors) ``` ### Implementare modelli HuggingFace TorchScript su AWS utilizzando Neuron SDK AWS ha introdotto [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) famiglia di istanze per l'inferenza di machine learning a basso costo e ad alte prestazioni nel cloud. Le istanze Inf1 sono alimentate dal chip AWS Inferentia, un acceleratore hardware personalizzato, specializzato in carichi di lavoro di inferenza di deep learning. [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) è l'SDK per Inferentia che supporta il tracciamento e l'ottimizzazione dei modelli transformers per distribuzione su Inf1. L'SDK Neuron fornisce: 1. API di facile utilizzo con una riga di modifica del codice per tracciare e ottimizzare un modello TorchScript per l'inferenza nel cloud. 2. Ottimizzazioni delle prestazioni pronte all'uso per [miglioramento dei costi-prestazioni](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>) 3. Supporto per i modelli di trasformatori HuggingFace costruiti con [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) o [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html). #### Implicazioni Modelli Transformers basati su architettura [BERT (Bidirectional Encoder Representations from Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert), o sue varianti come [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) e [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) funzioneranno meglio su Inf1 per attività non generative come la question answering estrattive, Classificazione della sequenza, Classificazione dei token. In alternativa, generazione di testo le attività possono essere adattate per essere eseguite su Inf1, secondo questo [tutorial AWS Neuron MarianMT](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html). Ulteriori informazioni sui modelli che possono essere convertiti fuori dagli schemi su Inferentia possono essere trovati nella [sezione Model Architecture Fit della documentazione Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia). #### Dipendenze L'utilizzo di AWS Neuron per convertire i modelli richiede le seguenti dipendenze e l'ambiente: * A [Neuron SDK environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide), which comes pre-configured on [AWS Deep Learning AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html). #### Convertire un modello per AWS Neuron Usando lo stesso script come in [Usando TorchScipt in Python](https://huggingface.co/docs/transformers/main/en/serialization#using-torchscript-in-python) per tracciare un "BertModel", importi l'estensione del framework `torch.neuron` per accedere i componenti di Neuron SDK tramite un'API Python. ```python from transformers import BertModel, BertTokenizer, BertConfig import torch import torch.neuron ``` E modificare solo la riga di codice di traccia Da: ```python torch.jit.trace(model, [tokens_tensor, segments_tensors]) ``` A: ```python torch.neuron.trace(model, [token_tensor, segments_tensors]) ``` Questa modifica consente a Neuron SDK di tracciare il modello e ottimizzarlo per l'esecuzione nelle istanze Inf1. Per ulteriori informazioni sulle funzionalità, gli strumenti, i tutorial di esempi e gli ultimi aggiornamenti di AWS Neuron SDK, consultare la [documentazione AWS NeuronSDK](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
transformers/docs/source/it/serialization.md/0
{ "file_path": "transformers/docs/source/it/serialization.md", "repo_id": "transformers", "token_count": 10324 }
469
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text generation strategies テキスト生成は、オープンエンドのテキスト生成、要約、翻訳など、多くの自然言語処理タスクに不可欠です。また、テキストを出力とするさまざまな混在モダリティアプリケーションにも影響を与えており、例えば音声からテキストへの変換や画像からテキストへの変換などがあります。テキストを生成できるいくつかのモデルには、GPT2、XLNet、OpenAI GPT、CTRL、TransformerXL、XLM、Bart、T5、GIT、Whisperが含まれます。 [`~generation.GenerationMixin.generate`] メソッドを使用して、異なるタスクのテキスト出力を生成するいくつかの例をご紹介します: * [テキスト要約](./tasks/summarization#inference) * [画像のキャプション](./model_doc/git#transformers.GitForCausalLM.forward.example) * [音声の転記](./model_doc/whisper#transformers.WhisperForConditionalGeneration.forward.example) generateメソッドへの入力は、モデルのモダリティに依存します。これらの入力は、AutoTokenizerやAutoProcessorなどのモデルのプリプロセッサクラスによって返されます。モデルのプリプロセッサが複数の種類の入力を生成する場合は、すべての入力をgenerate()に渡します。各モデルのプリプロセッサについての詳細は、対応するモデルのドキュメンテーションで確認できます。 テキストを生成するためのトークンの選択プロセスはデコーディングとして知られ、`generate()`メソッドが使用するデコーディング戦略をカスタマイズできます。デコーディング戦略を変更することは、訓練可能なパラメータの値を変更しませんが、生成されるテキストの品質に顕著な影響を与えることがあります。これにより、テキスト内の繰り返しを減少させ、より一貫性のあるテキストを生成するのに役立ちます。 このガイドでは以下の内容が説明されています: * デフォルトのテキスト生成設定 * 一般的なデコーディング戦略とその主要なパラメータ * 🤗 Hubのあなたのファインチューンモデルとカスタム生成設定の保存と共有 ## Default text generation configuration モデルのデコーディング戦略は、その生成設定で定義されています。[`pipeline`] 内で推論に事前訓練モデルを使用する際には、モデルはデフォルトの生成設定を内部で適用する `PreTrainedModel.generate()` メソッドを呼び出します。デフォルトの設定は、モデルにカスタム設定が保存されていない場合にも使用されます。 モデルを明示的に読み込む場合、それに付属する生成設定を `model.generation_config` を介して確認できます。 ```python >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") >>> model.generation_config GenerationConfig { "bos_token_id": 50256, "eos_token_id": 50256, } ``` `model.generation_config` を出力すると、デフォルトの生成設定から異なる値のみが表示され、デフォルトの値はリストされません。 デフォルトの生成設定では、出力のサイズは入力プロンプトとの組み合わせで最大20トークンに制限されており、リソース制限に達しないようにしています。デフォルトのデコーディング戦略は貪欲探索で、最も確率の高いトークンを次のトークンとして選択する最も単純なデコーディング戦略です。多くのタスクや小さな出力サイズの場合、これはうまく機能します。ただし、長い出力を生成するために使用される場合、貪欲探索は高度に繰り返される結果を生成し始めることがあります。 ## Customize text generation `generate` メソッドに直接パラメータとその値を渡すことで、`generation_config` を上書きできます。 ```python >>> my_model.generate(**inputs, num_beams=4, do_sample=True) # doctest: +SKIP ``` デフォルトのデコーディング戦略がほとんどのタスクでうまく機能する場合でも、いくつかの設定を微調整できます。一般的に調整されるパラメータには次のものがあります: - `max_new_tokens`: 生成するトークンの最大数。つまり、出力シーケンスのサイズであり、プロンプト内のトークンは含まれません。 - `num_beams`: 1よりも大きなビーム数を指定することで、貪欲検索からビームサーチに切り替えることができます。この戦略では、各時間ステップでいくつかの仮説を評価し、最終的に全体のシーケンスに対する最も高い確率を持つ仮説を選択します。これにより、初期の確率が低いトークンで始まる高確率のシーケンスが貪欲検索によって無視されることがなくなります。 - `do_sample`: このパラメータを`True`に設定すると、多項分布サンプリング、ビームサーチ多項分布サンプリング、Top-Kサンプリング、Top-pサンプリングなどのデコーディング戦略が有効になります。これらの戦略は、各戦略固有の調整を含む単語彙全体の確率分布から次のトークンを選択します。 - `num_return_sequences`: 各入力に対して返すシーケンス候補の数。これは、複数のシーケンス候補をサポートするデコーディング戦略(ビームサーチやサンプリングのバリエーションなど)にのみ適用されます。貪欲検索や対照的な検索など、単一の出力シーケンスを返すデコーディング戦略では使用できません。 ## Save a custom decoding strategy with your model 特定の生成構成で調整したモデルを共有したい場合、以下の手順を実行できます: * [`GenerationConfig`] クラスのインスタンスを作成する * デコーディング戦略のパラメータを指定する * [`GenerationConfig.save_pretrained`] を使用して生成構成を保存し、`config_file_name` 引数を空にすることを忘れないでください * `push_to_hub` を `True` に設定して、構成をモデルのリポジトリにアップロードします ```python >>> from transformers import AutoModelForCausalLM, GenerationConfig >>> model = AutoModelForCausalLM.from_pretrained("my_account/my_model") # doctest: +SKIP >>> generation_config = GenerationConfig( ... max_new_tokens=50, do_sample=True, top_k=50, eos_token_id=model.config.eos_token_id ... ) >>> generation_config.save_pretrained("my_account/my_model", push_to_hub=True) # doctest: +SKIP ``` 1つのディレクトリに複数の生成設定を保存することもでき、[`GenerationConfig.save_pretrained`] の `config_file_name` 引数を使用します。後で [`GenerationConfig.from_pretrained`] でこれらをインスタンス化できます。これは、1つのモデルに対して複数の生成設定を保存したい場合に便利です (例:サンプリングを使用したクリエイティブなテキスト生成用の1つと、ビームサーチを使用した要約用の1つ)。モデルに設定ファイルを追加するには、適切な Hub 権限が必要です。 ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, GenerationConfig >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-small") >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small") >>> translation_generation_config = GenerationConfig( ... num_beams=4, ... early_stopping=True, ... decoder_start_token_id=0, ... eos_token_id=model.config.eos_token_id, ... pad_token=model.config.pad_token_id, ... ) >>> # Tip: add `push_to_hub=True` to push to the Hub >>> translation_generation_config.save_pretrained("/tmp", "translation_generation_config.json") >>> # You could then use the named generation config file to parameterize generation >>> generation_config = GenerationConfig.from_pretrained("/tmp", "translation_generation_config.json") >>> inputs = tokenizer("translate English to French: Configuration files are easy to use!", return_tensors="pt") >>> outputs = model.generate(**inputs, generation_config=generation_config) >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['Les fichiers de configuration sont faciles à utiliser!'] ``` ## Streaming `generate()` は、その `streamer` 入力を介してストリーミングをサポートしています。`streamer` 入力は、次のメソッドを持つクラスのインスタンスと互換性があります:`put()` と `end()`。内部的には、`put()` は新しいトークンをプッシュするために使用され、`end()` はテキスト生成の終了をフラグ付けするために使用されます。 <Tip warning={true}> ストリーマークラスのAPIはまだ開発中であり、将来変更される可能性があります。 </Tip> 実際には、さまざまな目的に対して独自のストリーミングクラスを作成できます!また、使用できる基本的なストリーミングクラスも用意されています。例えば、[`TextStreamer`] クラスを使用して、`generate()` の出力を画面に単語ごとにストリームすることができます: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") >>> streamer = TextStreamer(tok) >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, ``` ## Decoding strategies 特定の `generate()` パラメータの組み合わせ、そして最終的に `generation_config` は、特定のデコーディング戦略を有効にするために使用できます。このコンセプトが新しい場合、[このブログポスト](https://huggingface.co/blog/how-to-generate)を読むことをお勧めします。このブログポストでは、一般的なデコーディング戦略がどのように動作するかが説明されています。 ここでは、デコーディング戦略を制御するいくつかのパラメータを示し、それらをどのように使用できるかを説明します。 ### Greedy Search [`generate`] はデフォルトで貪欲探索デコーディングを使用するため、有効にするためにパラメータを渡す必要はありません。これは、パラメータ `num_beams` が 1 に設定され、`do_sample=False` であることを意味します。 ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "I look forward to" >>> checkpoint = "distilbert/distilgpt2" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n'] ``` ### Contrastive search コントラスティブ検索デコーディング戦略は、2022年の論文[A Contrastive Framework for Neural Text Generation](https://arxiv.org/abs/2202.06417)で提案されました。 これは、非反復的でありながら一貫性のある長い出力を生成するために優れた結果を示しています。コントラスティブ検索の動作原理を学ぶには、[このブログポスト](https://huggingface.co/blog/introducing-csearch)をご覧ください。 コントラスティブ検索の動作を有効にし、制御する2つの主要なパラメータは「penalty_alpha」と「top_k」です: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Hugging Face Company is" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, penalty_alpha=0.6, top_k=4, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Hugging Face Company is a family owned and operated business. We pride ourselves on being the best in the business and our customer service is second to none.\n\nIf you have any questions about our products or services, feel free to contact us at any time. We look forward to hearing from you!'] ``` ### Multinomial sampling 常に最高確率のトークンを次のトークンとして選択する貪欲検索とは異なり、多項分布サンプリング(または祖先サンプリングとも呼ばれます)はモデルによって提供される語彙全体の確率分布に基づいて次のトークンをランダムに選択します。ゼロ以外の確率を持つすべてのトークンには選択される可能性があり、これにより繰り返しのリスクが減少します。 多項分布サンプリングを有効にするには、`do_sample=True` および `num_beams=1` を設定します。 ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed >>> set_seed(0) # For reproducibility >>> checkpoint = "openai-community/gpt2-large" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> prompt = "Today was an amazing day because" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> outputs = model.generate(**inputs, do_sample=True, num_beams=1, max_new_tokens=100) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Today was an amazing day because when you go to the World Cup and you don\'t, or when you don\'t get invited, that\'s a terrible feeling."'] ``` ### Beam-search decoding 貪欲探索とは異なり、ビームサーチデコーディングは各時間ステップでいくつかの仮説を保持し、最終的にシーケンス全体で最も確率が高い仮説を選択します。これにより、貪欲探索では無視されてしまう初期トークンの確率が低い高確率のシーケンスを特定する利点があります。 このデコーディング戦略を有効にするには、`num_beams`(追跡する仮説の数)を1よりも大きな値に指定します。 希望されるテキストの翻訳がお手伝いできて嬉しいです!もしさらなる質問やサポートが必要な場合は、お気軽にお知らせください。 ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "It is astonishing how one can" >>> checkpoint = "openai-community/gpt2-medium" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, max_new_tokens=50) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['It is astonishing how one can have such a profound impact on the lives of so many people in such a short period of time."\n\nHe added: "I am very proud of the work I have been able to do in the last few years.\n\n"I have'] ``` ### Beam-search multinomial sampling その名前からもわかるように、このデコーディング戦略はビームサーチと多項サンプリングを組み合わせています。このデコーディング戦略を使用するには、`num_beams` を1より大きな値に設定し、`do_sample=True` を設定する必要があります。 ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, set_seed >>> set_seed(0) # For reproducibility >>> prompt = "translate English to German: The house is wonderful." >>> checkpoint = "google-t5/t5-small" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, do_sample=True) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'Das Haus ist wunderbar.' ``` ### Diverse beam search decoding 多様なビームサーチデコーディング戦略は、ビームサーチ戦略の拡張であり、選択肢からより多様なビームシーケンスを生成できるようにします。この仕組みの詳細については、[Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf) をご参照ください。このアプローチには、`num_beams`、`num_beam_groups`、および `diversity_penalty` という3つの主要なパラメータがあります。多様性ペナルティは、出力がグループごとに異なることを保証し、ビームサーチは各グループ内で使用されます。 ```python >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> checkpoint = "google/pegasus-xsum" >>> prompt = ( ... "The Permaculture Design Principles are a set of universal design principles " ... "that can be applied to any location, climate and culture, and they allow us to design " ... "the most efficient and sustainable human habitation and food production systems. " ... "Permaculture is a design system that encompasses a wide variety of disciplines, such " ... "as ecology, landscape design, environmental science and energy conservation, and the " ... "Permaculture design principles are drawn from these various disciplines. Each individual " ... "design principle itself embodies a complete conceptual framework based on sound " ... "scientific principles. When we bring all these separate principles together, we can " ... "create a design system that both looks at whole systems, the parts that these systems " ... "consist of, and how those parts interact with each other to create a complex, dynamic, " ... "living system. Each design principle serves as a tool that allows us to integrate all " ... "the separate parts of a design, referred to as elements, into a functional, synergistic, " ... "whole system, where the elements harmoniously interact and work together in the most " ... "efficient way possible." ... ) >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint) >>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0) >>> tokenizer.decode(outputs[0], skip_special_tokens=True) 'The Design Principles are a set of universal design principles that can be applied to any location, climate and culture, and they allow us to design the' ``` ### Assisted Decoding アシストデコーディングは、上記のデコーディング戦略を変更したもので、同じトークナイザー(理想的にははるかに小さなモデル)を使用して、いくつかの候補トークンを貪欲に生成するアシスタントモデルを使用します。その後、主要なモデルは候補トークンを1つの前向きパスで検証し、デコーディングプロセスを高速化します。現在、アシストデコーディングでは貪欲検索とサンプリングのみがサポートされており、バッチ入力はサポートされていません。アシストデコーディングの詳細については、[このブログ記事](https://huggingface.co/blog/assisted-generation) をご覧ください。 アシストデコーディングを有効にするには、`assistant_model` 引数をモデルで設定します。 このガイドは、さまざまなデコーディング戦略を可能にする主要なパラメーターを説明しています。さらに高度なパラメーターは [`generate`] メソッドに存在し、[`generate`] メソッドの動作をさらに制御できます。使用可能なパラメーターの完全なリストについては、[APIドキュメント](./main_classes/text_generation.md) を参照してください。 ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a'] ``` サンプリング方法を使用する場合、アシストデコーディングでは `temperature` 引数を使用して、多項サンプリングと同様にランダム性を制御できます。ただし、アシストデコーディングでは、温度を低くすることで遅延の改善に役立ちます。 ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, set_seed >>> set_seed(42) # For reproducibility >>> prompt = "Alice and Bob" >>> checkpoint = "EleutherAI/pythia-1.4b-deduped" >>> assistant_checkpoint = "EleutherAI/pythia-160m-deduped" >>> tokenizer = AutoTokenizer.from_pretrained(checkpoint) >>> inputs = tokenizer(prompt, return_tensors="pt") >>> model = AutoModelForCausalLM.from_pretrained(checkpoint) >>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint) >>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Alice and Bob are going to the same party. It is a small party, in a small'] ```
transformers/docs/source/ja/generation_strategies.md/0
{ "file_path": "transformers/docs/source/ja/generation_strategies.md", "repo_id": "transformers", "token_count": 8973 }
470
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # コールバック数 コールバックは、PyTorch のトレーニング ループの動作をカスタマイズできるオブジェクトです。 トレーニング ループを検査できる [`Trainer`] (この機能は TensorFlow にはまだ実装されていません) 状態を確認し (進捗レポート、TensorBoard または他の ML プラットフォームへのログ記録など)、決定を下します (初期段階など)。 停止中)。 コールバックは、返される [`TrainerControl`] オブジェクトを除けば、「読み取り専用」のコード部分です。 トレーニング ループ内では何も変更できません。トレーニング ループの変更が必要なカスタマイズの場合は、次のことを行う必要があります。 [`Trainer`] をサブクラス化し、必要なメソッドをオーバーライドします (例については、[trainer](trainer) を参照してください)。 デフォルトでは、`TrainingArguments.report_to` は `"all"` に設定されているため、[`Trainer`] は次のコールバックを使用します。 - [`DefaultFlowCallback`] は、ログ記録、保存、評価のデフォルトの動作を処理します。 - [`PrinterCallback`] または [`ProgressCallback`] で進行状況を表示し、 ログ (最初のログは、[`TrainingArguments`] を通じて tqdm を非アクティブ化する場合に使用され、そうでない場合に使用されます) 2番目です)。 - [`~integrations.TensorBoardCallback`] (PyTorch >= 1.4 を介して) tensorboard にアクセスできる場合 またはテンソルボードX)。 - [`~integrations.WandbCallback`] [wandb](https://www.wandb.com/) がインストールされている場合。 - [`~integrations.CometCallback`] [comet_ml](https://www.comet.ml/site/) がインストールされている場合。 - [mlflow](https://www.mlflow.org/) がインストールされている場合は [`~integrations.MLflowCallback`]。 - [`~integrations.NeptuneCallback`] [neptune](https://neptune.ai/) がインストールされている場合。 - [`~integrations.AzureMLCallback`] [azureml-sdk](https://pypi.org/project/azureml-sdk/) の場合 インストールされています。 - [`~integrations.CodeCarbonCallback`] [codecarbon](https://pypi.org/project/codecarbon/) の場合 インストールされています。 - [`~integrations.ClearMLCallback`] [clearml](https://github.com/allegroai/clearml) がインストールされている場合。 - [`~integrations.DagsHubCallback`] [dagshub](https://dagshub.com/) がインストールされている場合。 - [`~integrations.FlyteCallback`] [flyte](https://flyte.org/) がインストールされている場合。 - [`~integrations.DVCLiveCallback`] [dvclive](https://www.dvc.org/doc/dvclive) がインストールされている場合。 パッケージがインストールされているが、付随する統合を使用したくない場合は、`TrainingArguments.report_to` を、使用したい統合のみのリストに変更できます (例: `["azure_ml", "wandb"]`) 。 コールバックを実装するメインクラスは [`TrainerCallback`] です。それは、 [`TrainingArguments`] は [`Trainer`] をインスタンス化するために使用され、それにアクセスできます。 [`TrainerState`] を介してトレーナーの内部状態を取得し、トレーニング ループ上でいくつかのアクションを実行できます。 [`TrainerControl`]。 ## 利用可能なコールバック ライブラリで利用可能な [`TrainerCallback`] のリストは次のとおりです。 [[autodoc]] integrations.CometCallback - setup [[autodoc]] DefaultFlowCallback [[autodoc]] PrinterCallback [[autodoc]] ProgressCallback [[autodoc]] EarlyStoppingCallback [[autodoc]] integrations.TensorBoardCallback [[autodoc]] integrations.WandbCallback - setup [[autodoc]] integrations.MLflowCallback - setup [[autodoc]] integrations.AzureMLCallback [[autodoc]] integrations.CodeCarbonCallback [[autodoc]] integrations.NeptuneCallback [[autodoc]] integrations.ClearMLCallback [[autodoc]] integrations.DagsHubCallback [[autodoc]] integrations.FlyteCallback [[autodoc]] integrations.DVCLiveCallback - setup ## TrainerCallback [[autodoc]] TrainerCallback 以下は、カスタム コールバックを PyTorch [`Trainer`] に登録する方法の例です。 ```python class MyCallback(TrainerCallback): "A callback that prints a message at the beginning of training" def on_train_begin(self, args, state, control, **kwargs): print("Starting training") trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, callbacks=[MyCallback], # We can either pass the callback class this way or an instance of it (MyCallback()) ) ``` コールバックを登録する別の方法は、次のように `trainer.add_callback()` を呼び出すことです。 ```python trainer = Trainer(...) trainer.add_callback(MyCallback) # Alternatively, we can pass an instance of the callback class trainer.add_callback(MyCallback()) ``` ## TrainerState [[autodoc]] TrainerState ## TrainerControl [[autodoc]] TrainerControl
transformers/docs/source/ja/main_classes/callback.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/callback.md", "repo_id": "transformers", "token_count": 2295 }
471
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tokenizer トークナイザーは、モデルの入力の準備を担当します。ライブラリには、すべてのモデルのトークナイザーが含まれています。ほとんど トークナイザーの一部は、完全な Python 実装と、 Rust ライブラリ [🤗 Tokenizers](https://github.com/huggingface/tokenizers)。 「高速」実装では次のことが可能になります。 1. 特にバッチトークン化を行う場合の大幅なスピードアップと 2. 元の文字列 (文字と単語) とトークン空間の間でマッピングする追加のメソッド (例: 特定の文字を含むトークンのインデックス、または特定のトークンに対応する文字の範囲)。 基本クラス [`PreTrainedTokenizer`] および [`PreTrainedTokenizerFast`] モデル入力の文字列入力をエンコードし (以下を参照)、Python をインスタンス化/保存するための一般的なメソッドを実装します。 ローカル ファイルまたはディレクトリ、またはライブラリによって提供される事前トレーニング済みトークナイザーからの「高速」トークナイザー (HuggingFace の AWS S3 リポジトリからダウンロード)。二人とも頼りにしているのは、 共通メソッドを含む [`~tokenization_utils_base.PreTrainedTokenizerBase`] [`~tokenization_utils_base.SpecialTokensMixin`]。 したがって、[`PreTrainedTokenizer`] と [`PreTrainedTokenizerFast`] はメインを実装します。 すべてのトークナイザーを使用するためのメソッド: - トークン化 (文字列をサブワード トークン文字列に分割)、トークン文字列を ID に変換したり、その逆の変換を行ったりします。 エンコード/デコード (つまり、トークン化と整数への変換)。 - 基礎となる構造 (BPE、SentencePiece...) から独立した方法で、語彙に新しいトークンを追加します。 - 特別なトークン (マスク、文の始まりなど) の管理: トークンの追加、属性への割り当て。 トークナイザーにより、簡単にアクセスでき、トークン化中に分割されないようにすることができます。 [`BatchEncoding`] は、 [`~tokenization_utils_base.PreTrainedTokenizerBase`] のエンコード メソッド (`__call__`、 `encode_plus` および `batch_encode_plus`) であり、Python 辞書から派生しています。トークナイザーが純粋な Python の場合 tokenizer の場合、このクラスは標準の Python 辞書と同じように動作し、によって計算されたさまざまなモデル入力を保持します。 これらのメソッド (`input_ids`、`attention_mask`...)。トークナイザーが「高速」トークナイザーである場合 (つまり、 HuggingFace [トークナイザー ライブラリ](https://github.com/huggingface/tokenizers))、このクラスはさらに提供します 元の文字列 (文字と単語) と トークンスペース (例: 指定された文字または対応する文字の範囲を構成するトークンのインデックスの取得) 与えられたトークンに)。 ## PreTrainedTokenizer [[autodoc]] PreTrainedTokenizer - __call__ - apply_chat_template - batch_decode - decode - encode - push_to_hub - all ## PreTrainedTokenizerFast [`PreTrainedTokenizerFast`] は [tokenizers](https://huggingface.co/docs/tokenizers) ライブラリに依存します。 🤗 トークナイザー ライブラリから取得したトークナイザーは、 🤗 トランスに非常に簡単にロードされます。これがどのように行われるかを理解するには、[🤗 tokenizers からの tokenizers を使用する](../fast_tokenizers) ページを参照してください。 [[autodoc]] PreTrainedTokenizerFast - __call__ - apply_chat_template - batch_decode - decode - encode - push_to_hub - all ## BatchEncoding [[autodoc]] BatchEncoding
transformers/docs/source/ja/main_classes/tokenizer.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/tokenizer.md", "repo_id": "transformers", "token_count": 1908 }
472
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTweet ## Overview BERTweet モデルは、Dat Quoc Nguyen、Thanh Vu によって [BERTweet: A pre-trained language model for English Tweets](https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf) で提案されました。アン・トゥアン・グエンさん。 論文の要約は次のとおりです。 *私たちは、英語ツイート用に初めて公開された大規模な事前トレーニング済み言語モデルである BERTweet を紹介します。私たちのBERTweetは、 BERT ベースと同じアーキテクチャ (Devlin et al., 2019) は、RoBERTa 事前トレーニング手順 (Liu et al.) を使用してトレーニングされます。 al.、2019)。実験では、BERTweet が強力なベースラインである RoBERTa ベースおよび XLM-R ベースを上回るパフォーマンスを示すことが示されています (Conneau et al., 2020)、3 つのツイート NLP タスクにおいて、以前の最先端モデルよりも優れたパフォーマンス結果が得られました。 品詞タグ付け、固有表現認識およびテキスト分類。* ## Usage example ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base") >>> # For transformers v4.x+: >>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False) >>> # For transformers v3.x: >>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base") >>> # INPUT TWEET IS ALREADY NORMALIZED! >>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:" >>> input_ids = torch.tensor([tokenizer.encode(line)]) >>> with torch.no_grad(): ... features = bertweet(input_ids) # Models outputs are now tuples >>> # With TensorFlow 2.0+: >>> # from transformers import TFAutoModel >>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base") ``` <Tip> この実装は、トークン化方法を除いて BERT と同じです。詳細については、[BERT ドキュメント](bert) を参照してください。 API リファレンス情報。 </Tip> このモデルは [dqnguyen](https://huggingface.co/dqnguyen) によって提供されました。元のコードは [ここ](https://github.com/VinAIResearch/BERTweet) にあります。 ## BertweetTokenizer [[autodoc]] BertweetTokenizer
transformers/docs/source/ja/model_doc/bertweet.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bertweet.md", "repo_id": "transformers", "token_count": 1155 }
473
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Chinese-CLIP ## Overview Chinese-CLIP An Yang, Junshu Pan, Junyang Lin, Rui Men, Yichang Zhang, Jingren Zhou, Chang Zhou [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) で提案されました。周、張周。 Chinese-CLIP は、中国語の画像とテキストのペアの大規模なデータセットに対する CLIP (Radford et al., 2021) の実装です。クロスモーダル検索を実行できるほか、ゼロショット画像分類、オープンドメインオブジェクト検出などのビジョンタスクのビジョンバックボーンとしても機能します。オリジナルの中国語-CLIPコードは[このリンクで](https://github.com/OFA-Sys/Chinese-CLIP)。 論文の要約は次のとおりです。 *CLIP の大成功 (Radford et al., 2021) により、視覚言語の事前訓練のための対照学習の研究と応用が促進されました。この研究では、ほとんどのデータが公開されているデータセットから取得された中国語の画像とテキストのペアの大規模なデータセットを構築し、新しいデータセットで中国語の CLIP モデルを事前トレーニングします。当社では、7,700 万から 9 億 5,800 万のパラメータにわたる、複数のサイズの 5 つの中国 CLIP モデルを開発しています。さらに、モデルのパフォーマンスを向上させるために、最初に画像エンコーダーをフリーズさせてモデルをトレーニングし、次にすべてのパラメーターを最適化してトレーニングする 2 段階の事前トレーニング方法を提案します。私たちの包括的な実験では、中国の CLIP がゼロショット学習と微調整のセットアップで MUGE、Flickr30K-CN、および COCO-CN 上で最先端のパフォーマンスを達成でき、ゼロで競争力のあるパフォーマンスを達成できることを実証しています。 - ELEVATER ベンチマークでの評価に基づくショット画像の分類 (Li et al., 2022)。コード、事前トレーニング済みモデル、デモがリリースされました。* Chinese-CLIP モデルは、[OFA-Sys](https://huggingface.co/OFA-Sys) によって提供されました。 ## Usage example 以下のコード スニペットは、画像とテキストの特徴と類似性を計算する方法を示しています。 ```python >>> from PIL import Image >>> import requests >>> from transformers import ChineseCLIPProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = ChineseCLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> # Squirtle, Bulbasaur, Charmander, Pikachu in English >>> texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"] >>> # compute image feature >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # normalize >>> # compute text features >>> inputs = processor(text=texts, padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize >>> # compute image-text similarity scores >>> inputs = processor(text=texts, images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # probs: [[1.2686e-03, 5.4499e-02, 6.7968e-04, 9.4355e-01]] ``` 現在、次のスケールの事前トレーニング済み Chinese-CLIP モデルが 🤗 Hub で利用可能です。 - [OFA-Sys/chinese-clip-vit-base-patch16](https://huggingface.co/OFA-Sys/chinese-clip-vit-base-patch16) - [OFA-Sys/chinese-clip-vit-large-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14) - [OFA-Sys/chinese-clip-vit-large-patch14-336px](https://huggingface.co/OFA-Sys/chinese-clip-vit-large-patch14-336px) - [OFA-Sys/chinese-clip-vit-huge-patch14](https://huggingface.co/OFA-Sys/chinese-clip-vit-huge-patch14) ## ChineseCLIPConfig [[autodoc]] ChineseCLIPConfig - from_text_vision_configs ## ChineseCLIPTextConfig [[autodoc]] ChineseCLIPTextConfig ## ChineseCLIPVisionConfig [[autodoc]] ChineseCLIPVisionConfig ## ChineseCLIPImageProcessor [[autodoc]] ChineseCLIPImageProcessor - preprocess ## ChineseCLIPFeatureExtractor [[autodoc]] ChineseCLIPFeatureExtractor ## ChineseCLIPProcessor [[autodoc]] ChineseCLIPProcessor ## ChineseCLIPModel [[autodoc]] ChineseCLIPModel - forward - get_text_features - get_image_features ## ChineseCLIPTextModel [[autodoc]] ChineseCLIPTextModel - forward ## ChineseCLIPVisionModel [[autodoc]] ChineseCLIPVisionModel - forward
transformers/docs/source/ja/model_doc/chinese_clip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/chinese_clip.md", "repo_id": "transformers", "token_count": 2245 }
474
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeBERTa-v2 ## Overview DeBERTa モデルは、Pengcheng He、Xiaodong Liu、Jianfeng Gao、Weizhu Chen によって [DeBERTa: Decoding-enhanced BERT with Disentangled Attendant](https://arxiv.org/abs/2006.03654) で提案されました。Google のモデルに基づいています。 2018年にリリースされたBERTモデルと2019年にリリースされたFacebookのRoBERTaモデル。 これは、もつれた注意を解きほぐし、使用されるデータの半分を使用して強化されたマスク デコーダ トレーニングを備えた RoBERTa に基づいて構築されています。 ロベルタ。 論文の要約は次のとおりです。 *事前トレーニングされたニューラル言語モデルの最近の進歩により、多くの自然言語モデルのパフォーマンスが大幅に向上しました。 言語処理 (NLP) タスク。この論文では、新しいモデル アーキテクチャ DeBERTa (Decoding-enhanced BERT with これは、2 つの新しい技術を使用して BERT モデルと RoBERTa モデルを改善します。 1つ目は、 もつれを解く注意メカニズム。各単語は、その内容をエンコードする 2 つのベクトルを使用して表現され、 単語間の注意の重みは、それらの単語のもつれ解除行列を使用して計算されます。 内容と相対的な位置。 2 番目に、強化されたマスク デコーダを使用して、出力ソフトマックス レイヤを次のように置き換えます。 モデルの事前トレーニング用にマスクされたトークンを予測します。これら 2 つの手法により効率が大幅に向上することを示します。 モデルの事前トレーニングと下流タスクのパフォーマンスの向上。 RoBERTa-Large と比較すると、DeBERTa モデルは半分のレベルでトレーニングされています。 トレーニング データは幅広い NLP タスクで一貫して優れたパフォーマンスを示し、MNLI で +0.9% の改善を達成しました。 (90.2% 対 91.1%)、SQuAD v2.0 では +2.3% (88.4% 対 90.7%)、RACE では +3.6% (83.2% 対 86.8%) でした。 DeBERTa コードと 事前トレーニングされたモデルは https://github.com/microsoft/DeBERTa で公開されます。* 次の情報は、[元の実装で直接表示されます リポジトリ](https://github.com/microsoft/DeBERTa)。 DeBERTa v2 は、DeBERTa モデルの 2 番目のバージョンです。それには以下が含まれます SuperGLUE 単一モデルの提出に使用された 1.5B モデルは、人間のベースライン 89.8 に対して 89.9 を達成しました。あなたはできる この投稿に関する詳細については、著者のドキュメントを参照してください。 [ブログ](https://www.microsoft.com/en-us/research/blog/microsoft-deberta-surpasses-human-performance-on-the-superglue-benchmark/) v2 の新機能: - **語彙** v2 では、トレーニング データから構築されたサイズ 128K の新しい語彙を使用するようにトークナイザーが変更されました。 GPT2 ベースのトークナイザーの代わりに、トークナイザーは [sentencepiece ベース](https://github.com/google/sentencepiece) トークナイザー。 - **nGiE(nGram Induced Input Encoding)** DeBERTa-v2 モデルは、最初の畳み込み層とは別に追加の畳み込み層を使用します。 トランスフォーマー層を使用して、入力トークンのローカル依存関係をよりよく学習します。 - **位置射影行列を注目レイヤーのコンテンツ射影行列と共有** 以前に基づく 実験では、パフォーマンスに影響を与えることなくパラメータを保存できます。 - **バケットを適用して相対位置をエンコードします** DeBERTa-v2 モデルはログ バケットを使用して相対位置をエンコードします T5に似ています。 - **900M モデル & 1.5B モデル** 2 つの追加モデル サイズ: 900M と 1.5B が利用可能で、これにより、パフォーマンスが大幅に向上します。 下流タスクのパフォーマンス。 このモデルは [DeBERTa](https://huggingface.co/DeBERTa) によって寄稿されました。このモデルの TF 2.0 実装は、 [kamalkraj](https://huggingface.co/kamalkraj) による投稿。元のコードは [こちら](https://github.com/microsoft/DeBERTa) にあります。 ## Resources - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [マスク言語モデリング タスク ガイド](../tasks/masked_language_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## DebertaV2Config [[autodoc]] DebertaV2Config ## DebertaV2Tokenizer [[autodoc]] DebertaV2Tokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## DebertaV2TokenizerFast [[autodoc]] DebertaV2TokenizerFast - build_inputs_with_special_tokens - create_token_type_ids_from_sequences <frameworkcontent> <pt> ## DebertaV2Model [[autodoc]] DebertaV2Model - forward ## DebertaV2PreTrainedModel [[autodoc]] DebertaV2PreTrainedModel - forward ## DebertaV2ForMaskedLM [[autodoc]] DebertaV2ForMaskedLM - forward ## DebertaV2ForSequenceClassification [[autodoc]] DebertaV2ForSequenceClassification - forward ## DebertaV2ForTokenClassification [[autodoc]] DebertaV2ForTokenClassification - forward ## DebertaV2ForQuestionAnswering [[autodoc]] DebertaV2ForQuestionAnswering - forward ## DebertaV2ForMultipleChoice [[autodoc]] DebertaV2ForMultipleChoice - forward </pt> <tf> ## TFDebertaV2Model [[autodoc]] TFDebertaV2Model - call ## TFDebertaV2PreTrainedModel [[autodoc]] TFDebertaV2PreTrainedModel - call ## TFDebertaV2ForMaskedLM [[autodoc]] TFDebertaV2ForMaskedLM - call ## TFDebertaV2ForSequenceClassification [[autodoc]] TFDebertaV2ForSequenceClassification - call ## TFDebertaV2ForTokenClassification [[autodoc]] TFDebertaV2ForTokenClassification - call ## TFDebertaV2ForQuestionAnswering [[autodoc]] TFDebertaV2ForQuestionAnswering - call ## TFDebertaV2ForMultipleChoice [[autodoc]] TFDebertaV2ForMultipleChoice - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deberta-v2.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deberta-v2.md", "repo_id": "transformers", "token_count": 3023 }
475
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Custom hardware for training モデルのトレーニングおよび推論に使用するハードウェアは、パフォーマンスに大きな影響を与えることがあります。GPUについて詳しく知りたい場合は、Tim Dettmerの優れた[ブログ記事](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/)をチェックしてみてください。 GPUセットアップの実用的なアドバイスをいくつか見てみましょう。 ## GPU より大きなモデルをトレーニングする場合、基本的には以下の3つのオプションがあります: - より大きなGPU - より多くのGPU - より多くのCPUおよびNVMe([DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)によるオフロード) まず、単一のGPUを使用する場合から始めましょう。 ### Power and Cooling 高価なハイエンドGPUを購入した場合、正しい電力供給と十分な冷却を提供することが重要です。 **電力**: 一部の高級コンシューマGPUカードには、2つまたは3つのPCI-E 8ピン電源ソケットがあります。カードにあるソケットの数だけ、独立した12V PCI-E 8ピンケーブルが接続されていることを確認してください。同じケーブルの一端にある2つの分岐(またはピッグテールケーブルとしても知られています)を使用しないでください。つまり、GPUに2つのソケットがある場合、PSUからカードに向けて2つのPCI-E 8ピンケーブルを使用し、1つのケーブルの端に2つのPCI-E 8ピンコネクタがあるものは使用しないでください!そうしないと、カードからのパフォーマンスを十分に引き出すことができません。 各PCI-E 8ピン電源ケーブルは、PSU側の12Vレールに接続する必要があり、最大で150Wの電力を供給できます。 一部のカードはPCI-E 12ピンコネクタを使用することがあり、これらは最大で500-600Wの電力を供給できます。 低価格帯のカードは6ピンコネクタを使用することがあり、最大で75Wの電力を供給します。 さらに、カードが必要とする安定した電圧を提供する高品質な電源ユニット(PSU)を使用する必要があります。 もちろん、PSUにはカードを駆動するために十分な未使用の電力が必要です。 **冷却**: GPUが過熱すると、スロットリングが開始され、フルパフォーマンスを提供しなくなり、過熱しすぎるとシャットダウンすることさえあります。 GPUが重要な負荷の下でどのような温度を目指すべきかを正確に示すことは難しいですが、おそらく+80℃未満であれば良いでしょうが、それより低い方が良いです - おそらく70-75℃が優れた範囲でしょう。スロットリングの開始温度はおそらく84-90℃のあたりからでしょう。スロットリングによるパフォーマンスの低下以外にも、長時間にわたる非常に高い温度はGPUの寿命を短縮する可能性があります。 次に、複数のGPUを持つ際に最も重要な側面の一つである接続について詳しく見てみましょう。 ### Multi-GPU Connectivity 複数のGPUを使用する場合、カードの相互接続方法はトータルのトレーニング時間に大きな影響を与える可能性があります。GPUが同じ物理ノードにある場合、次のように実行できます: ```bash nvidia-smi topo -m ``` もちろん、GPUがどのように相互接続されているかについて説明します。デュアルGPUを搭載し、NVLinkで接続されているマシンでは、おそらく以下のような情報が表示されるでしょう: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` 別のNVLinkなしのマシンでは、以下のような状況が発生するかもしれません: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` こちらが伝説です: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` 最初のレポートである `NV2` では、GPUは2つのNVLinkで接続されており、2番目のレポートである `PHB` では、典型的な消費者向けのPCIe+Bridgeセットアップが行われています。 あなたのセットアップでどの種類の接続性があるかを確認してください。これらの接続方法のいくつかはカード間の通信を速くすることができます(例:NVLink)、他のものは遅くすることができます(例:PHB)。 使用されるスケーラビリティソリューションの種類に応じて、接続速度は大きな影響を与えることも、小さな影響を与えることもあります。GPUがあまり頻繁に同期する必要がない場合、DDPのように、遅い接続の影響はそれほど重要ではありません。しかし、GPUが頻繁にメッセージを送信する必要がある場合、ZeRO-DPのように、高速の接続がより高速なトレーニングを実現するために非常に重要になります。 #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink) は、Nvidiaによって開発された有線のシリアルマルチレーンの近距離通信リンクです。 各新世代では、より高速な帯域幅が提供されます。たとえば、[Nvidia Ampere GA102 GPU Architecture](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf) からの引用です。 > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) したがって、`nvidia-smi topo -m` の出力の `NVX` レポートで取得する `X` が高いほど良いです。世代はあなたのGPUアーキテクチャに依存します。 小さなサンプルのwikitextを使用したgpt2言語モデルのトレーニングの実行を比較しましょう。 結果は次のとおりです: (ここに結果を挿入) 上記のテキストの日本語訳を提供しました。Markdownコードとしてフォーマットしました。どんな他の質問があれば、お気軽にお知らせください! | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | NVLinkを使用すると、トレーニングが約23%速く完了することがわかります。2番目のベンチマークでは、`NCCL_P2P_DISABLE=1`を使用して、GPUがNVLinkを使用しないように指示しています。 以下は、完全なベンチマークコードと出力です: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/ja/perf_hardware.md/0
{ "file_path": "transformers/docs/source/ja/perf_hardware.md", "repo_id": "transformers", "token_count": 4141 }
476
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Causal language modeling [[open-in-colab]] 言語モデリングには、因果的モデリングとマスクされた言語モデリングの 2 つのタイプがあります。このガイドでは、因果関係のある言語モデリングについて説明します。 因果言語モデルはテキスト生成によく使用されます。これらのモデルは、次のようなクリエイティブなアプリケーションに使用できます。 独自のテキスト アドベンチャーを選択するか、Copilot や CodeParrot などのインテリジェントなコーディング アシスタントを選択します。 <Youtube id="Vpjb1lu0MDk"/> 因果言語モデリングは、一連のトークン内の次のトークンを予測します。モデルは、次のトークンにのみ対応できます。 左。これは、モデルが将来のトークンを認識できないことを意味します。 GPT-2 は因果的言語モデルの一例です。 このガイドでは、次の方法を説明します。 1. [ELI5](https:/) の [r/askscience](https://www.reddit.com/r/askscience/) サブセットで [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) を微調整します。 /huggingface.co/datasets/eli5) データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/text-generation) を確認することをお勧めします。u </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load ELI5 dataset まず、ELI5 データセットの r/askscience サブセットの小さいサブセットを 🤗 データセット ライブラリからロードします。 これにより、完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が得られます。 ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5", split="train_asks[:5000]") ``` [`~datasets.Dataset.train_test_split`] メソッドを使用して、データセットの `train_asks` をトレイン セットとテスト セットに分割します。 ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` 次に、例を見てみましょう。 ```py >>> eli5["train"][0] {'answers': {'a_id': ['c3d1aib', 'c3d4lya'], 'score': [6, 3], 'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]}, 'answers_urls': {'url': []}, 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']}, 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls': {'url': []}} ``` これは多くのことのように見えるかもしれませんが、実際に関心があるのは`text`フィールドだけです。言語モデリングの優れている点 タスクでは、次の単語がラベル * であるため、ラベル (教師なしタスクとも呼ばれます) は必要ありません。 ## Preprocess <Youtube id="ma1TrR7gE7I"/> 次のステップは、`text`サブフィールドを処理するために DistilGPT2 トークナイザーをロードすることです。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") ``` 上の例からわかるように、`text`フィールドは実際には`answers`内にネストされています。つまり、次のことが必要になります。 [` flatten`](https://huggingface.co/docs/datasets/process.html#flatten) メソッドを使用して、ネストされた構造から `text` サブフィールドを抽出します。 ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'answers.a_id': ['c3d1aib', 'c3d4lya'], 'answers.score': [6, 3], 'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"], 'answers_urls.url': [], 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'], 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls.url': []} ``` `answers`接頭辞で示されるように、各サブフィールドは個別の列になり、`text`フィールドはリストになりました。その代わり 各文を個別にトークン化する場合は、リストを文字列に変換して、それらをまとめてトークン化できるようにします。 以下は、各例の文字列のリストを結合し、結果をトークン化する最初の前処理関数です。 ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` この前処理関数をデータセット全体に適用するには、🤗 Datasets [`~datasets.Dataset.map`] メソッドを使用します。 `map` 関数を高速化するには、`batched=True` を設定してデータセットの複数の要素を一度に処理し、`num_proc` でプロセスの数を増やします。不要な列を削除します。 ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` このデータセットにはトークン シーケンスが含まれていますが、その一部はモデルの最大入力長よりも長くなります。 2 番目の前処理関数を使用して、 - すべてのシーケンスを連結します - 連結されたシーケンスを`block_size`で定義された短いチャンクに分割します。これは、最大入力長より短く、GPU RAM に十分な長さである必要があります。 ```py >>> block_size = 128 >>> def group_texts(examples): ... # Concatenate all texts. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can ... # customize this part to your needs. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # Split by chunks of block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` Apply the `group_texts` function over the entire dataset: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` 次に、[`DataCollat​​orForLanguageModeling`] を使用してサンプルのバッチを作成します。 *動的にパディング*する方が効率的です。 データセット全体を最大長までパディングするのではなく、照合中にバッチ内の文を最長の長さにします。 <frameworkcontent> <pt> シーケンス終了トークンをパディング トークンとして使用し、`mlm=False` を設定します。これは、入力を 1 要素分右にシフトしたラベルとして使用します。 ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) ``` </pt> <tf> シーケンス終了トークンをパディング トークンとして使用し、`mlm=False` を設定します。これは、入力を 1 要素分右にシフトしたラベルとして使用します。 ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` </tf> </frameworkcontent> ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[基本チュートリアル](../training#train-with-pytorch-trainer) を参照してください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForCausalLM`] を使用して DistilGPT2 をロードします。 ```py >>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` この時点で残っている手順は次の 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。唯一の必須パラメータは、モデルの保存場所を指定する `output_dir` です。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。 2. トレーニング引数をモデル、データセット、データ照合器とともに [`Trainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_clm-model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.evaluate`] メソッドを使用してモデルを評価し、その複雑さを取得します。 ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 49.61 ``` 次に、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras を使用したモデルの微調整に慣れていない場合は、[基本チュートリアル](../training#train-a-tensorflow-model-with-keras) をご覧ください。 </Tip> TensorFlow でモデルを微調整するには、オプティマイザー関数、学習率スケジュール、およびいくつかのトレーニング ハイパーパラメーターをセットアップすることから始めます。 ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 次に、[`TFAutoModelForCausalLM`] を使用して DistilGPT2 をロードできます。 ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] を使用して、データセットを `tf.data.Dataset` 形式に変換します。 ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) を使用してトレーニング用のモデルを設定します。 Transformers モデルにはすべてデフォルトのタスク関連の損失関数があるため、次の場合を除き、損失関数を指定する必要はないことに注意してください。 ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` これは、モデルとトークナイザーを [`~transformers.PushToHubCallback`] でプッシュする場所を指定することで実行できます。 ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_clm-model", ... tokenizer=tokenizer, ... ) ``` ついに、モデルのトレーニングを開始する準備が整いました。トレーニングおよび検証データセット、エポック数、コールバックを指定して [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) を呼び出し、モデルを微調整します。 ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` トレーニングが完了すると、モデルは自動的にハブにアップロードされ、誰でも使用できるようになります。 </tf> </frameworkcontent> <Tip> 因果言語モデリング用にモデルを微調整する方法のより詳細な例については、対応するドキュメントを参照してください。 [PyTorch ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) または [TensorFlow ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)。 </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 テキストを生成するプロンプトを考え出します。 ```py >>> prompt = "Somatic hypermutation allows the immune system to" ``` 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用してテキスト生成用の`pipeline`をインスタンス化し、それにテキストを渡します。 ```py >>> from transformers import pipeline >>> generator = pipeline("text-generation", model="my_awesome_eli5_clm-model") >>> generator(prompt) [{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}] ``` <frameworkcontent> <pt> テキストをトークン化し、「input_ids」を PyTorch テンソルとして返します。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="pt").input_ids ``` [`~generation.GenerationMixin.generate`] メソッドを使用してテキストを生成します。 さまざまなテキスト生成戦略と生成を制御するためのパラメーターの詳細については、[テキスト生成戦略](../generation_strategies) ページを参照してください。 ```py >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("my_awesome_eli5_clm-model") >>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` 生成されたトークン ID をデコードしてテキストに戻します。 ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"] ``` </pt> <tf> テキストをトークン化し、`input_ids`を TensorFlow テンソルとして返します。 ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="tf").input_ids ``` [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] メソッドを使用して要約を作成します。さまざまなテキスト生成戦略と生成を制御するためのパラメーターの詳細については、[テキスト生成戦略](../generation_strategies) ページを参照してください。 ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("my_awesome_eli5_clm-model") >>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` 生成されたトークン ID をデコードしてテキストに戻します。 ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for'] ``` </tf> </frameworkcontent>
transformers/docs/source/ja/tasks/language_modeling.md/0
{ "file_path": "transformers/docs/source/ja/tasks/language_modeling.md", "repo_id": "transformers", "token_count": 7769 }
477
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Zero-shot object detection [[open-in-colab]] 従来、[オブジェクト検出](object_detection) に使用されるモデルには、トレーニング用のラベル付き画像データセットが必要でした。 トレーニング データからのクラスのセットの検出に限定されます。 ゼロショットオブジェクト検出は、別のアプローチを使用する [OWL-ViT](../model_doc/owlvit) モデルによってサポートされています。 OWL-ViT オープン語彙オブジェクト検出器です。これは、フリーテキストクエリに基づいて画像内のオブジェクトを検出できることを意味します。 ラベル付きデータセットでモデルを微調整する必要性。 OWL-ViTは、マルチモーダル表現を利用してオープン語彙の検出を実行します。 [CLIP](../model_doc/clip) とを組み合わせます。 軽量のオブジェクト分類および位置特定ヘッド。オープン語彙の検出は、CLIP のテキスト エンコーダーを使用してフリーテキスト クエリを埋め込み、それらをオブジェクト分類およびローカリゼーション ヘッドへの入力として使用することによって実現されます。 画像とそれに対応するテキストの説明を関連付け、ViT は画像パッチを入力として処理します。作家たち のOWL-ViTは、まずCLIPをゼロからトレーニングし、次に標準の物体検出データセットを使用してOWL-ViTをエンドツーエンドで微調整しました。 二部マッチング損失。 このアプローチを使用すると、モデルはラベル付きデータセットで事前にトレーニングしなくても、テキストの説明に基づいてオブジェクトを検出できます。 このガイドでは、OWL-ViT の使用方法を学習します。 - テキストプロンプトに基づいてオブジェクトを検出します - バッチオブジェクト検出用 - 画像誘導物体検出用 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q transformers ``` ## Zero-shot object detection pipeline OWL-ViTによる推論を試す最も簡単な方法は、OWL-ViTを[`pipeline`]で使用することです。パイプラインをインスタンス化する [Hugging Face Hub のチェックポイント](https://huggingface.co/models?other=owlvit) からのゼロショット オブジェクト検出の場合: ```python >>> from transformers import pipeline >>> checkpoint = "google/owlvit-base-patch32" >>> detector = pipeline(model=checkpoint, task="zero-shot-object-detection") ``` 次に、物体を検出したい画像を選択します。ここでは、宇宙飛行士アイリーン・コリンズの画像を使用します。 [NASA](https://www.nasa.gov/multimedia/imagegallery/index.html) Great Images データセットの一部。 ```py >>> import skimage >>> import numpy as np >>> from PIL import Image >>> image = skimage.data.astronaut() >>> image = Image.fromarray(np.uint8(image)).convert("RGB") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_1.png" alt="Astronaut Eileen Collins"/> </div> 検索する画像と候補オブジェクトのラベルをパイプラインに渡します。 ここでは画像を直接渡します。他の適切なオプションには、画像へのローカル パスまたは画像 URL が含まれます。また、画像をクエリするすべてのアイテムのテキスト説明も渡します。 ```py >>> predictions = detector( ... image, ... candidate_labels=["human face", "rocket", "nasa badge", "star-spangled banner"], ... ) >>> predictions [{'score': 0.3571370542049408, 'label': 'human face', 'box': {'xmin': 180, 'ymin': 71, 'xmax': 271, 'ymax': 178}}, {'score': 0.28099656105041504, 'label': 'nasa badge', 'box': {'xmin': 129, 'ymin': 348, 'xmax': 206, 'ymax': 427}}, {'score': 0.2110239565372467, 'label': 'rocket', 'box': {'xmin': 350, 'ymin': -1, 'xmax': 468, 'ymax': 288}}, {'score': 0.13790413737297058, 'label': 'star-spangled banner', 'box': {'xmin': 1, 'ymin': 1, 'xmax': 105, 'ymax': 509}}, {'score': 0.11950037628412247, 'label': 'nasa badge', 'box': {'xmin': 277, 'ymin': 338, 'xmax': 327, 'ymax': 380}}, {'score': 0.10649408400058746, 'label': 'rocket', 'box': {'xmin': 358, 'ymin': 64, 'xmax': 424, 'ymax': 280}}] ``` 予測を視覚化してみましょう。 ```py >>> from PIL import ImageDraw >>> draw = ImageDraw.Draw(image) >>> for prediction in predictions: ... box = prediction["box"] ... label = prediction["label"] ... score = prediction["score"] ... xmin, ymin, xmax, ymax = box.values() ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{label}: {round(score,2)}", fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_2.png" alt="Visualized predictions on NASA image"/> </div> ## Text-prompted zero-shot object detection by hand ゼロショット物体検出パイプラインの使用方法を確認したので、同じことを再現してみましょう。 手動で結果を取得します。 まず、[Hugging Face Hub のチェックポイント](https://huggingface.co/models?other=owlvit) からモデルと関連プロセッサをロードします。 ここでは、前と同じチェックポイントを使用します。 ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection >>> model = AutoModelForZeroShotObjectDetection.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` 気分を変えて、別の画像を撮ってみましょう。 ```py >>> import requests >>> url = "https://unsplash.com/photos/oj0zeY2Ltk4/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTR8fHBpY25pY3xlbnwwfHx8fDE2Nzc0OTE1NDk&force=true&w=640" >>> im = Image.open(requests.get(url, stream=True).raw) >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_3.png" alt="Beach photo"/> </div> プロセッサを使用してモデルの入力を準備します。プロセッサーは、 サイズ変更と正規化によるモデルの画像と、テキスト入力を処理する [`CLIPTokenizer`] です。 ```py >>> text_queries = ["hat", "book", "sunglasses", "camera"] >>> inputs = processor(text=text_queries, images=im, return_tensors="pt") ``` 入力をモデルに渡し、後処理し、結果を視覚化します。以前は画像プロセッサによって画像のサイズが変更されていたため、 それらをモデルにフィードするには、[`~OwlViTImageProcessor.post_process_object_detection`] メソッドを使用して、予測された境界を確認する必要があります。 ボックスは元の画像を基準とした正しい座標を持ちます。 ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = torch.tensor([im.size[::-1]]) ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(im) >>> scores = results["scores"].tolist() >>> labels = results["labels"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[label]}: {round(score,2)}", fill="white") >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## Batch processing 複数の画像セットとテキスト クエリを渡して、複数の画像内の異なる (または同じ) オブジェクトを検索できます。 宇宙飛行士の画像とビーチの画像を組み合わせてみましょう。 バッチ処理の場合、テキスト クエリをネストされたリストとしてプロセッサに渡し、画像を PIL イメージのリストとして渡す必要があります。 PyTorch テンソル、または NumPy 配列。 ```py >>> images = [image, im] >>> text_queries = [ ... ["human face", "rocket", "nasa badge", "star-spangled banner"], ... ["hat", "book", "sunglasses", "camera"], ... ] >>> inputs = processor(text=text_queries, images=images, return_tensors="pt") ``` 以前は後処理のために単一の画像のサイズをテンソルとして渡していましたが、タプルを渡すこともできます。 複数の画像のタプルのリスト。 2 つの例の予測を作成し、2 番目の例 (`image_idx = 1`) を視覚化しましょう。 ```py >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = [x.size[::-1] for x in images] ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes) >>> image_idx = 1 >>> draw = ImageDraw.Draw(images[image_idx]) >>> scores = results[image_idx]["scores"].tolist() >>> labels = results[image_idx]["labels"].tolist() >>> boxes = results[image_idx]["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[image_idx][label]}: {round(score,2)}", fill="white") >>> images[image_idx] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## Image-guided object detection テキストクエリによるゼロショットオブジェクト検出に加えて、OWL-ViTは画像ガイドによるオブジェクト検出を提供します。これはつまり 画像クエリを使用して、ターゲット画像内の類似したオブジェクトを検索できます。 テキスト クエリとは異なり、使用できるサンプル画像は 1 つだけです。 対象画像としてソファに2匹の猫がいる画像と、1匹の猫の画像を撮影しましょう クエリとして: ```py >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image_target = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000524280.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) ``` 画像を簡単に見てみましょう。 ```py >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 2) >>> ax[0].imshow(image_target) >>> ax[1].imshow(query_image) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_5.png" alt="Cats"/> </div> 前処理ステップでは、テキスト クエリの代わりに `query_images` を使用する必要があります。 ```py >>> inputs = processor(images=image_target, query_images=query_image, return_tensors="pt") ``` 予測の場合、入力をモデルに渡す代わりに、[`~OwlViTForObjectDetection.image_guided_detection`] に渡します。予測を描く ラベルがないことを除いては以前と同様です。 ```py >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) ... target_sizes = torch.tensor([image_target.size[::-1]]) ... results = processor.post_process_image_guided_detection(outputs=outputs, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(image_target) >>> scores = results["scores"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="white", width=4) >>> image_target ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_6.png" alt="Cats with bounding boxes"/> </div> OWL-ViTによる推論をインタラクティブに試したい場合は、このデモをチェックしてください。 <iframe src="https://adirik-owl-vit.hf.space" frameborder="0" width="850" height="450" ></iframe>
transformers/docs/source/ja/tasks/zero_shot_object_detection.md/0
{ "file_path": "transformers/docs/source/ja/tasks/zero_shot_object_detection.md", "repo_id": "transformers", "token_count": 5595 }
478
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AutoClass로 사전 학습된 인스턴스 로드[[load-pretrained-instances-with-an-autoclass]] 트랜스포머 아키텍처가 매우 다양하기 때문에 체크포인트에 맞는 아키텍처를 생성하는 것이 어려울 수 있습니다. 라이브러리를 쉽고 간단하며 유연하게 사용하기 위한 Transformer 핵심 철학의 일환으로, `AutoClass`는 주어진 체크포인트에서 올바른 아키텍처를 자동으로 추론하여 로드합니다. `from_pretrained()` 메서드를 사용하면 모든 아키텍처에 대해 사전 학습된 모델을 빠르게 로드할 수 있으므로 모델을 처음부터 학습하는 데 시간과 리소스를 투입할 필요가 없습니다. 체크포인트에 구애받지 않는 코드를 생성한다는 것은 코드가 한 체크포인트에서 작동하면 아키텍처가 다르더라도 다른 체크포인트(유사한 작업에 대해 학습된 경우)에서도 작동한다는 것을 의미합니다. <Tip> 아키텍처는 모델의 골격을 의미하며 체크포인트는 주어진 아키텍처에 대한 가중치입니다. 예를 들어, [BERT](https://huggingface.co/google-bert/bert-base-uncased)는 아키텍처이고, `google-bert/bert-base-uncased`는 체크포인트입니다. 모델은 아키텍처 또는 체크포인트를 의미할 수 있는 일반적인 용어입니다. </Tip> 이 튜토리얼에서는 다음을 학습합니다: * 사전 학습된 토크나이저 로드하기. * 사전 학습된 이미지 프로세서 로드하기. * 사전 학습된 특징 추출기 로드하기. * 사전 훈련된 프로세서 로드하기. * 사전 학습된 모델 로드하기. ## AutoTokenizer[[autotokenizer]] 거의 모든 NLP 작업은 토크나이저로 시작됩니다. 토크나이저는 사용자의 입력을 모델에서 처리할 수 있는 형식으로 변환합니다. [`AutoTokenizer.from_pretrained`]로 토크나이저를 로드합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` 그리고 아래와 같이 입력을 토큰화합니다: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## AutoImageProcessor[[autoimageprocessor]] 비전 작업의 경우 이미지 프로세서가 이미지를 올바른 입력 형식으로 처리합니다. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoFeatureExtractor[[autofeatureextractor]] 오디오 작업의 경우 특징 추출기가 오디오 신호를 올바른 입력 형식으로 처리합니다. [`AutoFeatureExtractor.from_pretrained`]로 특징 추출기를 로드합니다: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## AutoProcessor[[autoprocessor]] 멀티모달 작업에는 두 가지 유형의 전처리 도구를 결합한 프로세서가 필요합니다. 예를 들어 LayoutLMV2 모델에는 이미지를 처리하는 이미지 프로세서와 텍스트를 처리하는 토크나이저가 필요하며, 프로세서는 이 두 가지를 결합합니다. [`AutoProcessor.from_pretrained()`]로 프로세서를 로드합니다: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## AutoModel[[automodel]] <frameworkcontent> <pt> 마지막으로 AutoModelFor클래스를 사용하면 주어진 작업에 대해 미리 학습된 모델을 로드할 수 있습니다 (사용 가능한 작업의 전체 목록은 [여기](model_doc/auto)를 참조하세요). 예를 들어, [`AutoModelForSequenceClassification.from_pretrained`]를 사용하여 시퀀스 분류용 모델을 로드할 수 있습니다: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 동일한 체크포인트를 쉽게 재사용하여 다른 작업에 아키텍처를 로드할 수 있습니다: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> PyTorch모델의 경우 `from_pretrained()` 메서드는 내부적으로 피클을 사용하여 안전하지 않은 것으로 알려진 `torch.load()`를 사용합니다. 일반적으로 신뢰할 수 없는 소스에서 가져왔거나 변조되었을 수 있는 모델은 로드하지 마세요. 허깅 페이스 허브에서 호스팅되는 공개 모델의 경우 이러한 보안 위험이 부분적으로 완화되며, 각 커밋 시 멀웨어를 [검사합니다](https://huggingface.co/docs/hub/security-malware). GPG를 사용해 서명된 [커밋 검증](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg)과 같은 모범사례는 [문서](https://huggingface.co/docs/hub/security)를 참조하세요. 텐서플로우와 Flax 체크포인트는 영향을 받지 않으며, `from_pretrained`메서드에 `from_tf` 와 `from_flax` 키워드 가변 인자를 사용하여 이 문제를 우회할 수 있습니다. </Tip> 일반적으로 AutoTokenizer 클래스와 AutoModelFor 클래스를 사용하여 미리 학습된 모델 인스턴스를 로드하는 것이 좋습니다. 이렇게 하면 매번 올바른 아키텍처를 로드할 수 있습니다. 다음 [튜토리얼](preprocessing)에서는 새롭게 로드한 토크나이저, 이미지 프로세서, 특징 추출기를 사용하여 미세 튜닝용 데이터 세트를 전처리하는 방법에 대해 알아봅니다. </pt> <tf> 마지막으로 `TFAutoModelFor` 클래스를 사용하면 주어진 작업에 대해 사전 훈련된 모델을 로드할 수 있습니다. (사용 가능한 작업의 전체 목록은 [여기](model_doc/auto)를 참조하세요. 예를 들어, [`TFAutoModelForSequenceClassification.from_pretrained`]로 시퀀스 분류를 위한 모델을 로드합니다: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 쉽게 동일한 체크포인트를 재사용하여 다른 작업에 아키텍처를 로드할 수 있습니다: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 일반적으로, `AutoTokenizer`클래스와 `TFAutoModelFor` 클래스를 사용하여 미리 학습된 모델 인스턴스를 로드하는 것이 좋습니다. 이렇게 하면 매번 올바른 아키텍처를 로드할 수 있습니다. 다음 [튜토리얼](preprocessing)에서는 새롭게 로드한 토크나이저, 이미지 프로세서, 특징 추출기를 사용하여 미세 튜닝용 데이터 세트를 전처리하는 방법에 대해 알아봅니다. </tf> </frameworkcontent>
transformers/docs/source/ko/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/ko/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 5250 }
479
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaMA [[llama]] ## 개요 [[overview]] LLaMA 모델은 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample에 의해 제안된 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)에서 소개되었습니다. 이 모델은 7B에서 65B개의 파라미터까지 다양한 크기의 기초 언어 모델을 모아놓은 것입니다. 논문의 초록은 다음과 같습니다: *"LLaMA는 7B에서 65B개의 파라미터 수를 가진 기초 언어 모델의 모음입니다. 우리는 수조 개의 토큰으로 모델을 훈련시켰고, 공개적으로 이용 가능한 데이터셋만을 사용하여 최고 수준의 모델을 훈련시킬 수 있음을 보여줍니다. 특히, LLaMA-13B 모델은 대부분의 벤치마크에서 GPT-3 (175B)를 능가하며, LLaMA-65B는 최고 수준의 모델인 Chinchilla-70B와 PaLM-540B에 버금가는 성능을 보입니다. 우리는 모든 모델을 연구 커뮤니티에 공개합니다."* 팁: - LLaMA 모델의 가중치는 [이 양식](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form)을 작성하여 얻을 수 있습니다. - 가중치를 다운로드한 후에는 이를 [변환 스크립트](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py)를 사용하여 Hugging Face Transformers 형식으로 변환해야합니다. 변환 스크립트를 실행하려면 아래의 예시 명령어를 참고하세요: ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` - 변환을 하였다면 모델과 토크나이저는 다음과 같이 로드할 수 있습니다: ```python from transformers import LlamaForCausalLM, LlamaTokenizer tokenizer = LlamaTokenizer.from_pretrained("/output/path") model = LlamaForCausalLM.from_pretrained("/output/path") ``` 스크립트를 실행하기 위해서는 모델을 float16 정밀도로 전부 로드할 수 있을 만큼의 충분한 CPU RAM이 필요합니다. (가장 큰 버전의 모델이 여러 체크포인트로 나뉘어 있더라도, 각 체크포인트는 모델의 각 가중치의 일부를 포함하고 있기 때문에 모든 체크포인트를 RAM에 로드해야 합니다) 65B 모델의 경우, 총 130GB의 RAM이 필요합니다. - LLaMA 토크나이저는 [sentencepiece](https://github.com/google/sentencepiece)를 기반으로 하는 BPE 모델입니다. sentencepiece의 특징 중 하나는 시퀀스를 디코딩할 때 첫 토큰이 단어의 시작이라면 (예를 들어 "Banana"), 토크나이저는 문자열 앞에 공백을 추가하지 않는다는 것입니다. 이 모델은 [BlackSamorez](https://huggingface.co/BlackSamorez)의 기여와 함께, [zphang](https://huggingface.co/zphang)에 의해 제공되었습니다. Hugging Face에서의 구현 코드는 GPT-NeoX를 기반으로 하며 [여기](https://github.com/EleutherAI/gpt-neox)에서 찾을 수 있고, 저자의 코드 원본은 [여기](https://github.com/facebookresearch/llama)에서 확인할 수 있습니다. 원래 LLaMA 모델을 기반으로 Meta AI에서 몇 가지 후속 작업을 발표했습니다: - **Llama2**: Llama2는 구조적인 몇 가지 수정(Grouped Query Attention)을 통해 개선된 버전이며, 2조 개의 토큰으로 사전 훈련이 되어 있습니다. Llama2에 대한 자세한 내용은 [이 문서](llama2)를 참고하세요. ## 리소스 [[resources]] LLaMA를 시작하는 데 도움이 될 Hugging Face 및 커뮤니티(🌎로 표시)의 공식 자료 목록입니다. 여기에 자료를 제출하고 싶다면 Pull Request를 올려주세요! 추가할 자료는 기존의 자료와 중복되지 않고 새로운 내용을 보여주는 것이 좋습니다. <PipelineTag pipeline="text-classification"/> - LLaMA 모델을 텍스트 분류 작업에 적용하기 위한 프롬프트 튜닝 방법에 대한 [노트북](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) 🌎 <PipelineTag pipeline="question-answering"/> - [Stack Exchange](https://stackexchange.com/)에서 질문에 답하는 LLaMA를 훈련하는 방법을 위한 [StackLLaMA: RLHF로 LLaMA를 훈련하는 실전 가이드](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf) 🌎 ⚗️ 최적화 - 제한된 메모리를 가진 GPU에서 xturing 라이브러리를 사용하여 LLaMA 모델을 미세 조정하는 방법에 대한 [노트북](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) 🌎 ⚡️ 추론 - 🤗 PEFT 라이브러리의 PeftModel을 사용하여 LLaMA 모델을 실행하는 방법에 대한 [노트북](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) 🌎 - LangChain을 사용하여 PEFT 어댑터 LLaMA 모델을 로드하는 방법에 대한 [노트북](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) 🌎 🚀 배포 - 🤗 PEFT 라이브러리와 사용자 친화적인 UI로 LLaMA 모델을 미세 조정하는 방법에 대한 [노트북](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) 🌎 - Amazon SageMaker에서 텍스트 생성을 위해 Open-LLaMA 모델을 배포하는 방법에 대한 [노트북](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) 🌎 ## LlamaConfig [[llamaconfig]] [[autodoc]] LlamaConfig ## LlamaTokenizer [[llamatokenizer]] [[autodoc]] LlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LlamaTokenizerFast [[llamatokenizerfast]] [[autodoc]] LlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## LlamaModel [[llamamodel]] [[autodoc]] LlamaModel - forward ## LlamaForCausalLM [[llamaforcausallm]] [[autodoc]] LlamaForCausalLM - forward ## LlamaForSequenceClassification [[llamaforsequenceclassification]] [[autodoc]] LlamaForSequenceClassification - forward
transformers/docs/source/ko/model_doc/llama.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/llama.md", "repo_id": "transformers", "token_count": 4406 }
480
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 성능 및 확장성 [[performance-and-scalability]] 점점 더 큰 규모의 트랜스포머 모델을 훈련하고 프로덕션에 배포하는 데에는 다양한 어려움이 따릅니다. 훈련 중에는 모델이 사용 가능한 GPU 메모리보다 더 많은 메모리를 필요로 하거나 훈련 속도가 매우 느릴 수 있으며, 추론을 위해 배포할 때는 제품 환경에서 요구되는 처리량으로 인해 과부하가 발생할 수 있습니다. 이 문서는 이러한 문제를 극복하고 사용 사례에 가장 적합한 설정을 찾도록 도움을 주기 위해 설계되었습니다. 훈련과 추론으로 가이드를 분할했는데, 이는 각각 다른 문제와 해결 방법이 있기 때문입니다. 그리고 각 가이드에는 다양한 종류의 하드웨어 설정에 대한 별도의 가이드가 있습니다(예: 훈련을 위한 단일 GPU vs 다중 GPU 또는 추론을 위한 CPU vs GPU). ![perf_overview](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perf_overview.png) 이 문서는 사용자의 상황에 유용할 수 있는 방법들에 대한 개요 및 시작점 역할을 합니다. ## 훈련 [[training]] 효율적인 트랜스포머 모델 훈련에는 GPU나 TPU와 같은 가속기가 필요합니다. 가장 일반적인 경우는 단일 GPU만 사용하는 경우지만, 다중 GPU 및 CPU 훈련에 대한 섹션도 있습니다(곧 더 많은 내용이 추가될 예정). <Tip> 참고: 단일 GPU 섹션에서 소개된 대부분의 전략(예: 혼합 정밀도 훈련 또는 그라디언트 누적)은 일반적인 모델 훈련에도 적용되므로, 다중 GPU나 CPU 훈련과 같은 섹션을 살펴보기 전에 꼭 참고하시길 바랍니다. </Tip> ### 단일 GPU [[single-gpu]] 단일 GPU에서 대규모 모델을 훈련하는 것은 어려울 수 있지만, 이를 가능하게 하는 여러 가지 도구와 방법이 있습니다. 이 섹션에서는 혼합 정밀도 훈련, 그라디언트 누적 및 체크포인팅, 효율적인 옵티마이저, 최적의 배치 크기를 결정하기 위한 전략 등에 대해 논의합니다. [단일 GPU 훈련 섹션으로 이동](perf_train_gpu_one) ### 다중 GPU [[multigpu]] 단일 GPU에서 훈련하는 것이 너무 느리거나 대규모 모델에 적합하지 않은 경우도 있습니다. 다중 GPU 설정으로 전환하는 것은 논리적인 단계이지만, 여러 GPU에서 한 번에 훈련하려면 각 GPU마다 모델의 전체 사본을 둘지, 혹은 모델 자체도 여러 GPU에 분산하여 둘지 등 새로운 결정을 내려야 합니다. 이 섹션에서는 데이터, 텐서 및 파이프라인 병렬화에 대해 살펴봅니다. [다중 GPU 훈련 섹션으로 이동](perf_train_gpu_many) ### CPU [[cpu]] [CPU 훈련 섹션으로 이동](perf_train_cpu) ### TPU [[tpu]] [_곧 제공될 예정_](perf_train_tpu) ### 특수한 하드웨어 [[specialized-hardware]] [_곧 제공될 예정_](perf_train_special) ## 추론 [[inference]] 제품 및 서비스 환경에서 대규모 모델을 효율적으로 추론하는 것은 모델을 훈련하는 것만큼 어려울 수 있습니다. 이어지는 섹션에서는 CPU 및 단일/다중 GPU 설정에서 추론을 진행하는 단계를 살펴봅니다. ### CPU [[cpu]] [CPU 추론 섹션으로 이동](perf_infer_cpu) ### 단일 GPU [[single-gpu]] [단일 GPU 추론 섹션으로 이동](perf_infer_gpu_one) ### 다중 GPU [[multigpu]] [다중 GPU 추론 섹션으로 이동](perf_infer_gpu_many) ### 특수한 하드웨어 [[specialized-hardware]] [_곧 제공될 예정_](perf_infer_special) ## 하드웨어 [[hardware]] 하드웨어 섹션에서는 자신만의 딥러닝 장비를 구축할 때 유용한 팁과 요령을 살펴볼 수 있습니다. [하드웨어 섹션으로 이동](perf_hardware) ## 기여하기 [[contribute]] 이 문서는 완성되지 않은 상태이며, 추가해야 할 내용이나 수정 사항이 많이 있습니다. 따라서 추가하거나 수정할 내용이 있으면 주저하지 말고 PR을 열어 주시거나, 자세한 내용을 논의하기 위해 Issue를 시작해 주시기 바랍니다. A가 B보다 좋다고 하는 기여를 할 때는, 재현 가능한 벤치마크와/또는 해당 정보의 출처 링크를 포함해주세요(당신으로부터의 직접적인 정보가 아닌 경우).
transformers/docs/source/ko/performance.md/0
{ "file_path": "transformers/docs/source/ko/performance.md", "repo_id": "transformers", "token_count": 3692 }
481
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 이미지 분류[[image-classification]] [[open-in-colab]] <Youtube id="tjAIM7BOYhw"/> 이미지 분류는 이미지에 레이블 또는 클래스를 할당합니다. 텍스트 또는 오디오 분류와 달리 입력은 이미지를 구성하는 픽셀 값입니다. 이미지 분류에는 자연재해 후 피해 감지, 농작물 건강 모니터링, 의료 이미지에서 질병의 징후 검사 지원 등 다양한 응용 사례가 있습니다. 이 가이드에서는 다음을 설명합니다: 1. [Food-101](https://huggingface.co/datasets/food101) 데이터 세트에서 [ViT](model_doc/vit)를 미세 조정하여 이미지에서 식품 항목을 분류합니다. 2. 추론을 위해 미세 조정 모델을 사용합니다. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/image-classification)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에, 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate ``` Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티에 공유하는 것을 권장합니다. 메시지가 표시되면, 토큰을 입력하여 로그인하세요: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Food-101 데이터 세트 가져오기[[load-food101-dataset]] 🤗 Datasets 라이브러리에서 Food-101 데이터 세트의 더 작은 부분 집합을 가져오는 것으로 시작합니다. 이렇게 하면 전체 데이터 세트에 대한 훈련에 많은 시간을 할애하기 전에 실험을 통해 모든 것이 제대로 작동하는지 확인할 수 있습니다. ```py >>> from datasets import load_dataset >>> food = load_dataset("food101", split="train[:5000]") ``` 데이터 세트의 `train`을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 훈련 및 테스트 세트로 분할하세요: ```py >>> food = food.train_test_split(test_size=0.2) ``` 그리고 예시를 살펴보세요: ```py >>> food["train"][0] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F52AFC8AC50>, 'label': 79} ``` 데이터 세트의 각 예제에는 두 개의 필드가 있습니다: - `image`: 식품 항목의 PIL 이미지 - `label`: 식품 항목의 레이블 클래스 모델이 레이블 ID에서 레이블 이름을 쉽게 가져올 수 있도록 레이블 이름을 정수로 매핑하고, 정수를 레이블 이름으로 매핑하는 사전을 만드세요: ```py >>> labels = food["train"].features["label"].names >>> label2id, id2label = dict(), dict() >>> for i, label in enumerate(labels): ... label2id[label] = str(i) ... id2label[str(i)] = label ``` 이제 레이블 ID를 레이블 이름으로 변환할 수 있습니다: ```py >>> id2label[str(79)] 'prime_rib' ``` ## 전처리[[preprocess]] 다음 단계는 이미지를 텐서로 처리하기 위해 ViT 이미지 프로세서를 가져오는 것입니다: ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "google/vit-base-patch16-224-in21k" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` <frameworkcontent> <pt> 이미지에 몇 가지 이미지 변환을 적용하여 과적합에 대해 모델을 더 견고하게 만듭니다. 여기서 Torchvision의 [`transforms`](https://pytorch.org/vision/stable/transforms.html) 모듈을 사용하지만, 원하는 이미지 라이브러리를 사용할 수도 있습니다. 이미지의 임의 부분을 크롭하고 크기를 조정한 다음, 이미지 평균과 표준 편차로 정규화하세요: ```py >>> from torchvision.transforms import RandomResizedCrop, Compose, Normalize, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ToTensor(), normalize]) ``` 그런 다음 전처리 함수를 만들어 변환을 적용하고 이미지의 `pixel_values`(모델에 대한 입력)를 반환하세요: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(img.convert("RGB")) for img in examples["image"]] ... del examples["image"] ... return examples ``` 전체 데이터 세트에 전처리 기능을 적용하려면 🤗 Datasets [`~datasets.Dataset.with_transform`]을 사용합니다. 데이터 세트의 요소를 가져올 때 변환이 즉시 적용됩니다: ```py >>> food = food.with_transform(transforms) ``` 이제 [`DefaultDataCollator`]를 사용하여 예제 배치를 만듭니다. 🤗 Transformers의 다른 데이터 콜레이터와 달리, `DefaultDataCollator`는 패딩과 같은 추가적인 전처리를 적용하지 않습니다. ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 과적합을 방지하고 모델을 보다 견고하게 만들기 위해 데이터 세트의 훈련 부분에 데이터 증강을 추가합니다. 여기서 Keras 전처리 레이어로 훈련 데이터에 대한 변환(데이터 증강 포함)과 검증 데이터에 대한 변환(중앙 크로핑, 크기 조정, 정규화만)을 정의합니다. `tf.image` 또는 다른 원하는 라이브러리를 사용할 수 있습니다. ```py >>> from tensorflow import keras >>> from tensorflow.keras import layers >>> size = (image_processor.size["height"], image_processor.size["width"]) >>> train_data_augmentation = keras.Sequential( ... [ ... layers.RandomCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... layers.RandomFlip("horizontal"), ... layers.RandomRotation(factor=0.02), ... layers.RandomZoom(height_factor=0.2, width_factor=0.2), ... ], ... name="train_data_augmentation", ... ) >>> val_data_augmentation = keras.Sequential( ... [ ... layers.CenterCrop(size[0], size[1]), ... layers.Rescaling(scale=1.0 / 127.5, offset=-1), ... ], ... name="val_data_augmentation", ... ) ``` 다음으로 한 번에 하나의 이미지가 아니라 이미지 배치에 적절한 변환을 적용하는 함수를 만듭니다. ```py >>> import numpy as np >>> import tensorflow as tf >>> from PIL import Image >>> def convert_to_tf_tensor(image: Image): ... np_image = np.array(image) ... tf_image = tf.convert_to_tensor(np_image) ... # `expand_dims()` is used to add a batch dimension since ... # the TF augmentation layers operates on batched inputs. ... return tf.expand_dims(tf_image, 0) >>> def preprocess_train(example_batch): ... """Apply train_transforms across a batch.""" ... images = [ ... train_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ... def preprocess_val(example_batch): ... """Apply val_transforms across a batch.""" ... images = [ ... val_data_augmentation(convert_to_tf_tensor(image.convert("RGB"))) for image in example_batch["image"] ... ] ... example_batch["pixel_values"] = [tf.transpose(tf.squeeze(image)) for image in images] ... return example_batch ``` 🤗 Datasets [`~datasets.Dataset.set_transform`]를 사용하여 즉시 변환을 적용하세요: ```py food["train"].set_transform(preprocess_train) food["test"].set_transform(preprocess_val) ``` 최종 전처리 단계로 `DefaultDataCollator`를 사용하여 예제 배치를 만듭니다. 🤗 Transformers의 다른 데이터 콜레이터와 달리 `DefaultDataCollator`는 패딩과 같은 추가 전처리를 적용하지 않습니다. ```py >>> from transformers import DefaultDataCollator >>> data_collator = DefaultDataCollator(return_tensors="tf") ``` </tf> </frameworkcontent> ## 평가[[evaluate]] 훈련 중에 평가 지표를 포함하면 모델의 성능을 평가하는 데 도움이 되는 경우가 많습니다. 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) 라이브러리로 평가 방법을 빠르게 가져올 수 있습니다. 이 작업에서는 [accuracy](https://huggingface.co/spaces/evaluate-metric/accuracy) 평가 지표를 가져옵니다. (🤗 Evaluate [빠른 둘러보기](https://huggingface.co/docs/evaluate/a_quick_tour)를 참조하여 평가 지표를 가져오고 계산하는 방법에 대해 자세히 알아보세요): ```py >>> import evaluate >>> accuracy = evaluate.load("accuracy") ``` 그런 다음 예측과 레이블을 [`~evaluate.EvaluationModule.compute`]에 전달하여 정확도를 계산하는 함수를 만듭니다: ```py >>> import numpy as np >>> def compute_metrics(eval_pred): ... predictions, labels = eval_pred ... predictions = np.argmax(predictions, axis=1) ... return accuracy.compute(predictions=predictions, references=labels) ``` 이제 `compute_metrics` 함수를 사용할 준비가 되었으며, 훈련을 설정하면 이 함수로 되돌아올 것입니다. ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]를 사용하여 모델을 미세 조정하는 방법에 익숙하지 않은 경우, [여기](../training#train-with-pytorch-trainer)에서 기본 튜토리얼을 확인하세요! </Tip> 이제 모델을 훈련시킬 준비가 되었습니다! [`AutoModelForImageClassification`]로 ViT를 가져옵니다. 예상되는 레이블 수, 레이블 매핑 및 레이블 수를 지정하세요: ```py >>> from transformers import AutoModelForImageClassification, TrainingArguments, Trainer >>> model = AutoModelForImageClassification.from_pretrained( ... checkpoint, ... num_labels=len(labels), ... id2label=id2label, ... label2id=label2id, ... ) ``` 이제 세 단계만 거치면 끝입니다: 1. [`TrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. `image` 열이 삭제되기 때문에 미사용 열을 제거하지 않는 것이 중요합니다. `image` 열이 없으면 `pixel_values`을 생성할 수 없습니다. 이 동작을 방지하려면 `remove_unused_columns=False`로 설정하세요! 다른 유일한 필수 매개변수는 모델 저장 위치를 지정하는 `output_dir`입니다. `push_to_hub=True`로 설정하면 이 모델을 허브에 푸시합니다(모델을 업로드하려면 Hugging Face에 로그인해야 합니다). 각 에폭이 끝날 때마다, [`Trainer`]가 정확도를 평가하고 훈련 체크포인트를 저장합니다. 2. [`Trainer`]에 모델, 데이터 세트, 토크나이저, 데이터 콜레이터 및 `compute_metrics` 함수와 함께 훈련 인수를 전달하세요. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정하세요. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_food_model", ... remove_unused_columns=False, ... eval_strategy="epoch", ... save_strategy="epoch", ... learning_rate=5e-5, ... per_device_train_batch_size=16, ... gradient_accumulation_steps=4, ... per_device_eval_batch_size=16, ... num_train_epochs=3, ... warmup_ratio=0.1, ... logging_steps=10, ... load_best_model_at_end=True, ... metric_for_best_model="accuracy", ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=data_collator, ... train_dataset=food["train"], ... eval_dataset=food["test"], ... tokenizer=image_processor, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` 훈련이 완료되면, 모든 사람이 모델을 사용할 수 있도록 [`~transformers.Trainer.push_to_hub`] 메소드로 모델을 허브에 공유하세요: ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <frameworkcontent> <tf> <Tip> Keras를 사용하여 모델을 미세 조정하는 방법에 익숙하지 않은 경우, 먼저 [기본 튜토리얼](./training#train-a-tensorflow-model-with-keras)을 확인하세요! </Tip> TensorFlow에서 모델을 미세 조정하려면 다음 단계를 따르세요: 1. 훈련 하이퍼파라미터를 정의하고 옵티마이저와 학습률 스케쥴을 설정합니다. 2. 사전 훈련된 모델을 인스턴스화합니다. 3. 🤗 Dataset을 `tf.data.Dataset`으로 변환합니다. 4. 모델을 컴파일합니다. 5. 콜백을 추가하고 훈련을 수행하기 위해 `fit()` 메소드를 사용합니다. 6. 커뮤니티와 공유하기 위해 모델을 🤗 Hub에 업로드합니다. 하이퍼파라미터, 옵티마이저 및 학습률 스케쥴을 정의하는 것으로 시작합니다: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_epochs = 5 >>> num_train_steps = len(food["train"]) * num_epochs >>> learning_rate = 3e-5 >>> weight_decay_rate = 0.01 >>> optimizer, lr_schedule = create_optimizer( ... init_lr=learning_rate, ... num_train_steps=num_train_steps, ... weight_decay_rate=weight_decay_rate, ... num_warmup_steps=0, ... ) ``` 그런 다음 레이블 매핑과 함께 [`TFAuto ModelForImageClassification`]으로 ViT를 가져옵니다: ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ) ``` 데이터 세트를 [`~datasets.Dataset.to_tf_dataset`]와 `data_collator`를 사용하여 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> # converting our train dataset to tf.data.Dataset >>> tf_train_dataset = food["train"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) >>> # converting our test dataset to tf.data.Dataset >>> tf_eval_dataset = food["test"].to_tf_dataset( ... columns="pixel_values", label_cols="label", shuffle=True, batch_size=batch_size, collate_fn=data_collator ... ) ``` `compile()`를 사용하여 훈련 모델을 구성하세요: ```py >>> from tensorflow.keras.losses import SparseCategoricalCrossentropy >>> loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) >>> model.compile(optimizer=optimizer, loss=loss) ``` 예측에서 정확도를 계산하고 모델을 🤗 Hub로 푸시하려면 [Keras callbacks](../main_classes/keras_callbacks)를 사용하세요. `compute_metrics` 함수를 [KerasMetricCallback](../main_classes/keras_callbacks#transformers.KerasMetricCallback)에 전달하고, [PushToHubCallback](../main_classes/keras_callbacks#transformers.PushToHubCallback)을 사용하여 모델을 업로드합니다: ```py >>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_eval_dataset) >>> push_to_hub_callback = PushToHubCallback( ... output_dir="food_classifier", ... tokenizer=image_processor, ... save_strategy="no", ... ) >>> callbacks = [metric_callback, push_to_hub_callback] ``` 이제 모델을 훈련할 준비가 되었습니다! 훈련 및 검증 데이터 세트, 에폭 수와 함께 `fit()`을 호출하고, 콜백을 사용하여 모델을 미세 조정합니다: ```py >>> model.fit(tf_train_dataset, validation_data=tf_eval_dataset, epochs=num_epochs, callbacks=callbacks) Epoch 1/5 250/250 [==============================] - 313s 1s/step - loss: 2.5623 - val_loss: 1.4161 - accuracy: 0.9290 Epoch 2/5 250/250 [==============================] - 265s 1s/step - loss: 0.9181 - val_loss: 0.6808 - accuracy: 0.9690 Epoch 3/5 250/250 [==============================] - 252s 1s/step - loss: 0.3910 - val_loss: 0.4303 - accuracy: 0.9820 Epoch 4/5 250/250 [==============================] - 251s 1s/step - loss: 0.2028 - val_loss: 0.3191 - accuracy: 0.9900 Epoch 5/5 250/250 [==============================] - 238s 949ms/step - loss: 0.1232 - val_loss: 0.3259 - accuracy: 0.9890 ``` 축하합니다! 모델을 미세 조정하고 🤗 Hub에 공유했습니다. 이제 추론에 사용할 수 있습니다! </tf> </frameworkcontent> <Tip> 이미지 분류를 위한 모델을 미세 조정하는 자세한 예제는 다음 [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 좋아요, 이제 모델을 미세 조정했으니 추론에 사용할 수 있습니다! 추론을 수행하고자 하는 이미지를 가져와봅시다: ```py >>> ds = load_dataset("food101", split="validation[:10]") >>> image = ds["image"][0] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png" alt="image of beignets"/> </div> 미세 조정 모델로 추론을 시도하는 가장 간단한 방법은 [`pipeline`]을 사용하는 것입니다. 모델로 이미지 분류를 위한 `pipeline`을 인스턴스화하고 이미지를 전달합니다: ```py >>> from transformers import pipeline >>> classifier = pipeline("image-classification", model="my_awesome_food_model") >>> classifier(image) [{'score': 0.31856709718704224, 'label': 'beignets'}, {'score': 0.015232225880026817, 'label': 'bruschetta'}, {'score': 0.01519392803311348, 'label': 'chicken_wings'}, {'score': 0.013022331520915031, 'label': 'pork_chop'}, {'score': 0.012728818692266941, 'label': 'prime_rib'}] ``` 원한다면, `pipeline`의 결과를 수동으로 복제할 수도 있습니다: <frameworkcontent> <pt> 이미지를 전처리하기 위해 이미지 프로세서를 가져오고 `input`을 PyTorch 텐서로 반환합니다: ```py >>> from transformers import AutoImageProcessor >>> import torch >>> image_processor = AutoImageProcessor.from_pretrained("my_awesome_food_model") >>> inputs = image_processor(image, return_tensors="pt") ``` 입력을 모델에 전달하고 logits을 반환합니다: ```py >>> from transformers import AutoModelForImageClassification >>> model = AutoModelForImageClassification.from_pretrained("my_awesome_food_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 확률이 가장 높은 예측 레이블을 가져오고, 모델의 `id2label` 매핑을 사용하여 레이블로 변환합니다: ```py >>> predicted_label = logits.argmax(-1).item() >>> model.config.id2label[predicted_label] 'beignets' ``` </pt> </frameworkcontent> <frameworkcontent> <tf> 이미지를 전처리하기 위해 이미지 프로세서를 가져오고 `input`을 TensorFlow 텐서로 반환합니다: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("MariaK/food_classifier") >>> inputs = image_processor(image, return_tensors="tf") ``` 입력을 모델에 전달하고 logits을 반환합니다: ```py >>> from transformers import TFAutoModelForImageClassification >>> model = TFAutoModelForImageClassification.from_pretrained("MariaK/food_classifier") >>> logits = model(**inputs).logits ``` 확률이 가장 높은 예측 레이블을 가져오고, 모델의 `id2label` 매핑을 사용하여 레이블로 변환합니다: ```py >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] 'beignets' ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/image_classification.md", "repo_id": "transformers", "token_count": 11497 }
482
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Transformers로 작업을 해결하는 방법[[how-transformers-solve-tasks]] [🤗 Transformers로 할 수 있는 작업](task_summary)에서 자연어 처리(NLP), 음성 및 오디오, 컴퓨터 비전 작업 등의 중요한 응용을 배웠습니다. 이 페이지에서는 모델이 이러한 작업을 어떻게 해결하는지 자세히 살펴보고 내부에서 어떤 일이 일어나는지 설명합니다. 주어진 작업을 해결하는 많은 방법이 있으며, 일부 모델은 특정 기술을 구현하거나 심지어 새로운 방식으로 작업에 접근할 수도 있지만, Transformer 모델의 경우 일반적인 아이디어는 동일합니다. 유연한 아키텍처 덕분에 대부분의 모델은 인코더, 디코더 또는 인코더-디코더 구조의 변형입니다. Transformer 모델뿐만 아니라 우리의 라이브러리에는 오늘날 컴퓨터 비전 작업에 사용되는 몇 가지 합성곱 신경망(CNNs)도 있습니다. 또한, 우리는 현대 CNN의 작동 방식에 대해 설명할 것입니다. 작업이 어떻게 해결되는지 설명하기 위해, 유용한 예측을 출력하고자 모델 내부에서 어떤 일이 일어나는지 살펴봅니다. - 오디오 분류 및 자동 음성 인식(ASR)을 위한 [Wav2Vec2](model_doc/wav2vec2) - 이미지 분류를 위한 [Vision Transformer (ViT)](model_doc/vit) 및 [ConvNeXT](model_doc/convnext) - 객체 탐지를 위한 [DETR](model_doc/detr) - 이미지 분할을 위한 [Mask2Former](model_doc/mask2former) - 깊이 추정을 위한 [GLPN](model_doc/glpn) - 인코더를 사용하는 텍스트 분류, 토큰 분류 및 질의응답과 같은 NLP 작업을 위한 [BERT](model_doc/bert) - 디코더를 사용하는 텍스트 생성과 같은 NLP 작업을 위한 [GPT2](model_doc/gpt2) - 인코더-디코더를 사용하는 요약 및 번역과 같은 NLP 작업을 위한 [BART](model_doc/bart) <Tip> 더 나아가기 전에, 기존 Transformer 아키텍처에 대한 기본적인 지식을 숙지하는 것이 좋습니다. 인코더, 디코더 및 어텐션의 작동 방식을 알면 다양한 Transformer 모델이 어떻게 작동하는지 이해하는 데 도움이 됩니다. 시작 단계거나 복습이 필요한 경우, 더 많은 정보를 위해 [코스](https://huggingface.co/course/chapter1/4?fw=pt)를 확인하세요! </Tip> ## 음성 및 오디오[[speech-and-audio]] [Wav2Vec2](model_doc/wav2vec2)는 레이블이 지정되지 않은 음성 데이터에 대해 사전훈련된 모델로, 오디오 분류 및 자동 음성 인식을 위해 레이블이 지정된 데이터로 미세 조정합니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/wav2vec2_architecture.png"/> </div> 이 모델에는 4가지 주요 구성 요소가 있습니다: 1. *특징 인코더(feature encoder)*는 원시 오디오 파형(raw audio waveform)을 가져와서 제로 평균 및 단위 분산으로 표준화하고, 각각 20ms 길이의 특징 벡터의 시퀀스로 변환합니다. 2. 오디오 파형은 본질적으로 연속적이기 때문에, 텍스트 시퀀스를 단어로 나누는 것과 같이 분할할 수 없습니다. 그래서 *양자화 모듈(quantization module)*로 전달되는 특징 벡터는 이산형 음성 단위를 학습하기 위한 것입니다. 음성 단위는 *코드북(codebook)*(어휘집이라고 생각할 수 있습니다)이라는 코드단어(codewords) 콜렉션에서 선택됩니다. 코드북에서 연속적인 오디오 입력을 가장 잘 나타내는 벡터 또는 음성 단위가 선택되어 모델을 통과합니다. 3. 특징 벡터의 절반은 무작위로 마스크가 적용되며, 마스크된 특징 벡터는 *상대적 위치 임베딩*을 추가하는 Transformer 인코더인 *문맥 네트워크(context network)*로 전달됩니다. 4. 문맥 네트워크의 사전훈련 목표는 *대조적 작업(contrastive task)*입니다. 모델은 잘못된 예측 시퀀스에서 마스크된 예측의 실제 양자화된 음성 표현을 예측하며, 모델이 가장 유사한 컨텍스트 벡터와 양자화된 음성 단위(타겟 레이블)를 찾도록 권장합니다. 이제 wav2vec2가 사전훈련되었으므로, 오디오 분류 또는 자동 음성 인식을 위해 데이터에 맞춰 미세 조정할 수 있습니다! ### 오디오 분류[[audio-classification]] 사전훈련된 모델을 오디오 분류에 사용하려면, 기본 Wav2Vec2 모델 상단에 시퀀스 분류 헤드를 추가하면 됩니다. 분류 헤드는 인코더의 은닉 상태(hidden states)를 받는 선형 레이어입니다. 은닉 상태는 각각 길이가 다른 오디오 프레임에서 학습된 특징을 나타냅니다. 고정 길이의 벡터 하나를 만들기 위해, 은닉 상태는 먼저 풀링되고, 클래스 레이블에 대한 로짓으로 변환됩니다. 가장 가능성이 높은 클래스를 찾기 위해 로짓과 타겟 사이의 교차 엔트로피 손실이 계산됩니다. 오디오 분류에 직접 도전할 준비가 되셨나요? 완전한 [오디오 분류 가이드](tasks/audio_classification)를 확인하여 Wav2Vec2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 자동 음성 인식[[automatic-speech-recognition]] 사전훈련된 모델을 자동 음성 인식에 사용하려면, [연결주의적 시간 분류(CTC, Connectionist Temporal Classification)](glossary#connectionist-temporal-classification-ctc)를 위해 기본 Wav2Vec2 모델 상단에 언어 모델링 헤드를 추가합니다. 언어 모델링 헤드는 인코더의 은닉 상태를 받아서 로짓으로 변환합니다. 각 로짓은 토큰 클래스(토큰 수는 작업의 어휘에서 나타납니다)를 나타냅니다. CTC 손실은 텍스트로 디코딩된 토큰에서 가장 가능성이 높은 토큰 시퀀스를 찾기 위해 로짓과 타겟 사이에서 계산됩니다. 자동 음성 인식에 직접 도전할 준비가 되셨나요? 완전한 [자동 음성 인식 가이드](tasks/asr)를 확인하여 Wav2Vec2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ## 컴퓨터 비전[[computer-vision]] 컴퓨터 비전 작업에 접근하는 2가지 방법이 있습니다: 1. 이미지를 패치 시퀀스로 분리하고 Transformer로 병렬 처리합니다. 2. [ConvNeXT](model_doc/convnext)와 같은 현대 CNN을 사용합니다. 이는 합성곱 레이어를 기반으로 하지만 현대 네트워크 설계를 적용합니다. <Tip> 세 번째 방법은 Transformer와 합성곱(예를 들어, [Convolutional Vision Transformer](model_doc/cvt) 또는 [LeViT](model_doc/levit))을 결합하는 것입니다. 우리는 살펴볼 두 가지 방법만 결합하기 때문에 여기서 이 방법을 다루지 않습니다. </Tip> ViT와 ConvNeXT는 일반적으로 이미지 분류에서 사용되지만, 물체 감지, 분할, 깊이 추정과 같은 다른 비전 작업에는 각각 DETR, Mask2Former, GLPN이 더 적합하므로 이러한 모델을 살펴보겠습니다. ### 이미지 분류[[image-classification]] ViT와 ConvNeXT 모두 이미지 분류에 사용될 수 있지만, ViT는 어텐션 메커니즘을, ConvNeXT는 합성곱을 사용하는 것이 주된 차이입니다. #### Transformer[[transformer]] [ViT](model_doc/vit)은 합성곱을 전적으로 순수 Transformer 아키텍처로 대체합니다. 기존 Transformer에 익숙하다면, ViT를 이해하는 방법의 대부분을 이미 파악했다고 볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"/> </div> ViT가 도입한 주요 변경 사항은 이미지가 Transformer로 어떻게 전달되는지에 있습니다: 1. 이미지는 서로 중첩되지 않는 정사각형 패치로 분할되고, 각 패치는 벡터 또는 *패치 임베딩(patch embedding)*으로 변환됩니다. 패치 임베딩은 적절한 입력 차원을 만드는 2D 합성곱 계층에서 생성됩니다(기본 Transformer의 경우 각 패치의 임베딩마다 768개의 값이 필요합니다). 224x224 픽셀 이미지가 있다면, 16x16 이미지 패치 196개로 분할할 수 있습니다. 텍스트가 단어로 토큰화되는 것처럼, 이미지도 패치 시퀀스로 "토큰화"됩니다. 2. *학습 가능한 임베딩(learnable embedding)*(특수한 `[CLS]` 토큰)이 BERT와 같이 패치 임베딩의 시작 부분에 추가됩니다. `[CLS]` 토큰의 마지막 은닉 상태는 부착된 분류 헤드의 입력으로 사용되고, 다른 출력은 무시됩니다. 이 토큰은 모델이 이미지의 표현을 인코딩하는 방법을 학습하는 데 도움이 됩니다. 3. 패치와 학습 가능한 임베딩에 마지막으로 추가할 것은 *위치 임베딩*입니다. 왜냐하면 모델은 이미지 패치의 순서를 모르기 때문입니다. 위치 임베딩도 학습 가능하며, 패치 임베딩과 동일한 크기를 가집니다. 최종적으로, 모든 임베딩이 Transformer 인코더에 전달됩니다. 4. `[CLS]` 토큰을 포함한 출력은 다층 퍼셉트론 헤드(MLP)에 전달됩니다. ViT의 사전훈련 목표는 단순히 분류입니다. 다른 분류 헤드와 같이, MLP 헤드는 출력을 클래스 레이블에 대해 로짓으로 변환하고 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 클래스를 찾습니다. 이미지 분류에 직접 도전할 준비가 되셨나요? 완전한 [이미지 분류 가이드](tasks/image_classification)를 확인하여 ViT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! #### CNN[[cnn]] <Tip> 이 섹션에서는 합성곱에 대해 간략하게 설명합니다. 그러나 이미지의 모양과 크기가 어떻게 변화하는지에 대한 사전 이해가 있다면 도움이 될 것입니다. 합성곱에 익숙하지 않은 경우, fastai book의 [합성곱 신경망 챕터](https://github.com/fastai/fastbook/blob/master/13_convolutions.ipynb)를 확인하세요! </Tip> [ConvNeXT](model_doc/convnext)는 성능을 높이기 위해 새로운 현대 네트워크 설계를 적용한 CNN 구조입니다. 그러나 합성곱은 여전히 모델의 핵심입니다. 높은 수준의 관점에서 볼 때, [합성곱](glossary#convolution)은 작은 행렬(*커널*)에 이미지 픽셀의 작은 윈도우를 곱하는 연산입니다. 이는 특정 텍스쳐(texture)이나 선의 곡률과 같은 일부 특징을 계산합니다. 그러고 다음 픽셀 윈도우로 넘어가는데, 여기서 합성곱이 이동하는 거리를 *보폭(stride)*이라고 합니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convolution.gif"/> </div> <small>패딩이나 보폭이 없는 기본 합성곱, <a href="https://arxiv.org/abs/1603.07285">딥러닝을 위한 합성곱 연산 가이드</a></small> 이 출력을 다른 합성곱 레이어에 전달할 수 있으며, 각 연속적인 레이어를 통해 네트워크는 핫도그나 로켓과 같이 더 복잡하고 추상적인 것을 학습합니다. 합성곱 레이어 사이에 풀링 레이어를 추가하여 차원을 줄이고 특징의 위치 변화에 대해 모델을 더 견고하게 만드는 것이 일반적입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnext_architecture.png"/> </div> ConvNeXT는 CNN을 5가지 방식으로 현대화합니다: 1. 각 단계의 블록 수를 변경하고 더 큰 보폭과 그에 대응하는 커널 크기로 이미지를 "패치화(patchify)"합니다. 겹치지 않는 슬라이딩 윈도우는 ViT가 이미지를 패치로 분할하는 방법과 유사하게 이 패치화 전략을 만듭니다. 2. *병목(bottleneck)* 레이어는 채널 수를 줄였다가 다시 복원합니다. 왜냐하면 1x1 합성곱을 수행하는 것이 더 빠르고, 깊이를 늘릴 수 있기 때문입니다. 역 병목(inverted bottlenect)은 채널 수를 확장하고 축소함으로써 그 반대로 수행하므로, 메모리 효율이 더 높습니다. 3. 병목 레이어의 일반적인 3x3 합성곱 레이어를 각 입력 채널에 개별적으로 합성곱을 적용한 다음 마지막에 쌓는 *깊이별 합성곱(depthwise convolution)*으로 대체합니다. 이는 네트워크 폭이 넓혀 성능이 향상됩니다. 4. ViT는 어텐션 메커니즘 덕분에 한 번에 더 많은 이미지를 볼 수 있는 전역 수신 필드를 가지고 있습니다. ConvNeXT는 커널 크기를 7x7로 늘려 이 효과를 재현하려고 시도합니다. 5. 또한 ConvNeXT는 Transformer 모델을 모방하는 몇 가지 레이어 설계를 변경합니다. 활성화 및 정규화 레이어가 더 적고, 활성화 함수가 ReLU 대신 GELU로 전환되고, BatchNorm 대신 LayerNorm을 사용합니다. 합성곱 블록의 출력은 분류 헤드로 전달되며, 분류 헤드는 출력을 로짓으로 변환하고 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 레이블을 찾습니다. ### 객체 탐지[[object-detection]] [DETR](model_doc/detr), *DEtection TRansformer*는 CNN과 Transformer 인코더-디코더를 결합한 종단간(end-to-end) 객체 탐지 모델입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/detr_architecture.png"/> </div> 1. 사전훈련된 CNN *백본(backbone)*은 픽셀 값으로 나타낸 이미지를 가져와 저해상도 특징 맵을 만듭니다. 특징 맵에 대해 1x1 합성곱을 적용하여 차원을 줄이고, 고수준 이미지 표현을 가진 새로운 특징 맵을 생성합니다. Transformer는 시퀀스 모델이기 때문에 특징 맵을 위치 임베딩과 결합된 특징 벡터의 시퀀스로 평탄화합니다. 2. 특징 벡터는 어텐션 레이어를 사용하여 이미지 표현을 학습하는 인코더에 전달됩니다. 다음으로, 인코더의 은닉 상태는 디코더에서 *객체 쿼리*와 결합됩니다. 객체 쿼리는 이미지의 다른 영역에 초점을 맞춘 학습된 임베딩으로 학습되고, 각 어텐션 레이어를 진행하면서 갱신됩니다. 디코더의 은닉 상태는 각 객체 쿼리에 대한 바운딩 박스 좌표와 클래스 레이블을 예측하는 순방향 네트워크에 전달되며, 객체가 없는 경우 `no object`가 출력됩니다. DETR은 각 객체 쿼리를 병렬로 디코딩하여 *N* 개의 최종 예측을 출력합니다. 여기서 *N*은 쿼리 수입니다. 한 번에 하나의 요소를 예측하는 일반적인 자기회귀 모델과 달리, 객체 탐지는 한 번에 *N* 개의 예측을 수행하는 집합 예측 작업(`바운딩 박스`, `클래스 레이블`)입니다. 3. DETR은 훈련 중 *이분 매칭 손실(bipartite matching loss)*을 사용하여 고정된 수의 예측과 고정된 실제 정답 레이블(ground truth labels) 세트를 비교합니다. *N*개의 레이블 세트에 실제 정답 레이블보다 적은 경우, `no object` 클래스로 패딩됩니다. 이 손실 함수는 DETR이 예측과 실제 정답 레이블 간 1:1 대응을 찾도록 권장합니다. 바운딩 박스 또는 클래스 레이블 중 하나라도 잘못된 경우, 손실이 발생합니다. 마찬가지로, 존재하지 않는 객체를 예측하는 경우, 패널티를 받습니다. 이로 인해 DETR은 이미지에서 눈에 잘 띄는 물체 하나에 집중하는 대신, 다른 객체를 찾도록 권장됩니다. 객체 탐지 헤드가 DETR 상단에 추가되어 클래스 레이블과 바운딩 박스의 좌표를 찾습니다. 객체 탐지 헤드에는 두 가지 구성 요소가 있습니다: 디코더 은닉 상태를 클래스 레이블의 로짓으로 변환하는 선형 레이어 및 바운딩 박스를 예측하는 MLP 객체 탐지에 직접 도전할 준비가 되셨나요? 완전한 [객체 탐지 가이드](tasks/object_detection)를 확인하여 DETR을 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 이미지 분할[[image-segmentation]] [Mask2Former](model_doc/mask2former)는 모든 유형의 이미지 분할 작업을 해결하는 범용 아키텍처입니다. 전통적인 분할 모델은 일반적으로 시멘틱(semantic) 또는 파놉틱(panoptic) 분할과 같은 이미지 분할의 특정 하위 작업에 맞춰 조정됩니다. Mask2Former는 모든 작업을 *마스크 분류* 문제로 구성합니다. 마스크 분류는 픽셀을 *N*개 세그먼트로 그룹화하고, 주어진 이미지에 대해 *N*개의 마스크와 그에 대응하는 클래스 레이블을 예측합니다. 이 섹션에서 Mask2Former의 작동 방법을 설명한 다음, 마지막에 SegFormer를 미세 조정해볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mask2former_architecture.png"/> </div> Mask2Former에는 3가지 주요 구성 요소가 있습니다: 1. [Swin](model_doc/swin) 백본이 이미지를 받아 3개의 연속된 3x3 합성곱에서 저해상도 이미지 특징 맵을 생성합니다. 2. 특징 맵은 *픽셀 디코더*에 전달됩니다. 이 디코더는 저해상도 특징을 고해상도 픽셀 임베딩으로 점진적으로 업샘플링합니다. 픽셀 디코더는 실제로 원본 이미지의 1/32, 1/16, 1/8 해상도의 다중 스케일 특징(저해상도 및 고해상도 특징 모두 포함)을 생성합니다. 3. 이러한 서로 다른 크기의 특징 맵은 고해상도 특징에서 작은 객체를 포착하기 위해 한 번에 하나의 Transformer 디코더 레이어에 연속적으로 공급됩니다. Mask2Former의 핵심은 디코더의 *마스크 어텐션* 메커니즘입니다. 전체 이미지를 참조할 수 있는 크로스 어텐션(cross-attention)과 달리, 마스크 어텐션은 이미지의 특정 영역에만 집중합니다. 이는 이미지의 지역적 특징만으로 모델이 충분히 학습할 수 있기 때문에 더 빠르고 성능이 우수합니다. 4. [DETR](tasks_explained#object-detection)과 같이, Mask2Former는 학습된 객체 쿼리를 사용하고 이를 픽셀 디코더에서의 이미지 특징과 결합하여 예측 집합(`클래스 레이블`, `마스크 예측`)을 생성합니다. 디코더의 은닉 상태는 선형 레이어로 전달되어 클래스 레이블에 대한 로짓으로 변환됩니다. 로짓과 클래스 레이블 사이의 교차 엔트로피 손실을 계산하여 가장 가능성이 높은 것을 찾습니다. 마스크 예측은 픽셀 임베딩과 최종 디코더 은닉 상태를 결합하여 생성됩니다. 시그모이드 교차 엔트로피 및 Dice 손실은 로짓과 실제 정답 마스크(ground truth mask) 사이에서 계산되어 가장 가능성이 높은 마스크를 찾습니다. 이미지 분할에 직접 도전할 준비가 되셨나요? 완전한 [이미지 분할 가이드](tasks/semantic_segmentation)를 확인하여 SegFormer를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 깊이 추정[[depth-estimation]] [GLPN](model_doc/glpn), *Global-Local Path Network*는 [SegFormer](model_doc/segformer) 인코더와 경량 디코더를 결합한 깊이 추정을 위한 Transformer입니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/glpn_architecture.jpg"/> </div> 1. ViT와 같이, 이미지는 패치 시퀀스로 분할되지만, 이미지 패치가 더 작다는 점이 다릅니다. 이는 세그멘테이션이나 깊이 추정과 같은 밀도 예측 작업에 더 적합합니다. 이미지 패치는 패치 임베딩으로 변환되어(패치 임베딩이 생성되는 방법은 [이미지 분류](#image-classification) 섹션을 참조하세요), 인코더로 전달됩니다. 2. 인코더는 패치 임베딩을 받아, 여러 인코더 블록에 전달합니다. 각 블록은 어텐션 및 Mix-FFN 레이어로 구성됩니다. 후자의 목적은 위치 정보를 제공하는 것입니다. 각 인코더 블록의 끝에는 계층적 표현을 생성하기 위한 *패치 병합(patch merging)* 레이어가 있습니다. 각 인접한 패치 그룹의 특징은 연결되고, 연결된 특징에 선형 레이어가 적용되어 패치 수를 1/4의 해상도로 줄입니다. 이는 다음 인코더 블록의 입력이 되며, 이러한 전체 프로세스는 1/8, 1/16, 1/32 해상도의 이미지 특징을 가질 때까지 반복됩니다. 3. 경량 디코더는 인코더에서 마지막 특징 맵(1/32 크기)을 가져와 1/16 크기로 업샘플링합니다. 여기서, 특징은 *선택적 특징 융합(SFF, Selective Feature Fusion)* 모듈로 전달됩니다. 이 모듈은 각 특징에 대해 어텐션 맵에서 로컬 및 전역 특징을 선택하고 결합한 다음, 1/8로 업샘플링합니다. 이 프로세스는 디코딩된 특성이 원본 이미지와 동일한 크기가 될 때까지 반복됩니다. 출력은 두 개의 합성곱 레이어를 거친 다음, 시그모이드 활성화가 적용되어 각 픽셀의 깊이를 예측합니다. ## 자연어처리[[natural-language-processing]] Transformer는 초기에 기계 번역을 위해 설계되었고, 그 이후로는 사실상 모든 NLP 작업을 해결하기 위한 기본 아키텍처가 되었습니다. 어떤 작업은 Transformer의 인코더 구조에 적합하며, 다른 작업은 디코더에 더 적합합니다. 또 다른 작업은 Transformer의 인코더-디코더 구조를 모두 활용합니다. ### 텍스트 분류[[text-classification]] [BERT](model_doc/bert)는 인코더 전용 모델이며, 텍스트의 풍부한 표현을 학습하기 위해 양방향의 단어에 주목함으로써 심층 양방향성(deep bidirectionality)을 효과적으로 구현한 최초의 모델입니다. 1. BERT는 [WordPiece](tokenizer_summary#wordpiece) 토큰화를 사용하여 문장의 토큰 임베딩을 생성합니다. 단일 문장과 한 쌍의 문장을 구분하기 위해 특수한 `[SEP]` 토큰이 추가됩니다. 모든 텍스트 시퀀스의 시작 부분에는 특수한 `[CLS]` 토큰이 추가됩니다. `[CLS]` 토큰이 있는 최종 출력은 분류 작업을 위한 분류 헤드로 입력에 사용됩니다. BERT는 또한 한 쌍의 문장에서 각 토큰이 첫 번째 문장인지 두 번째 문장에 속하는지 나타내는 세그먼트 임베딩(segment embedding)을 추가합니다. 2. BERT는 마스크드 언어 모델링과 다음 문장 예측, 두 가지 목적으로 사전훈련됩니다. 마스크드 언어 모델링에서는 입력 토큰의 일부가 무작위로 마스킹되고, 모델은 이를 예측해야 합니다. 이는 모델이 모든 단어를 보고 다음 단어를 "예측"할 수 있는 양방향성 문제를 해결합니다. 예측된 마스크 토큰의 최종 은닉 상태는 어휘에 대한 소프트맥스가 있는 순방향 네트워크로 전달되어 마스크된 단어를 예측합니다. 두 번째 사전훈련 대상은 다음 문장 예측입니다. 모델은 문장 B가 문장 A 다음에 오는지 예측해야 합니다. 문장 B가 다음 문장인 경우와 무작위 문장인 경우 각각 50%의 확률로 발생합니다. 다음 문장인지 아닌지에 대한 예측은 두 개의 클래스(`IsNext` 및 `NotNext`)에 대한 소프트맥스가 있는 순방향 네트워크로 전달됩니다. 3. 입력 임베딩은 여러 인코더 레이어를 거쳐서 최종 은닉 상태를 출력합니다. 사전훈련된 모델을 텍스트 분류에 사용하려면, 기본 BERT 모델 상단에 시퀀스 분류 헤드를 추가합니다. 시퀀스 분류 헤드는 최종 은닉 상태를 받는 선형 레이어이며, 로짓으로 변환하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 타겟 간에 계산되어 가장 가능성이 높은 레이블을 찾습니다. 텍스트 분류에 직접 도전할 준비가 되셨나요? 완전한 [텍스트 분류 가이드](tasks/sequence_classification)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 토큰 분류[[token-classification]] 개체명 인식(Named Entity Recognition, NER)과 같은 토큰 분류 작업에 BERT를 사용하려면, 기본 BERT 모델 상단에 토큰 분류 헤드를 추가합니다. 토큰 분류 헤드는 최종 은닉 상태를 받는 선형 레이어이며, 로짓으로 변환하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 각 토큰 간에 계산되어 가장 가능성이 높은 레이블을 찾습니다. 토큰 분류에 직접 도전할 준비가 되셨나요? 완전한 [토큰 분류 가이드](tasks/token_classification)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! ### 질의응답[[question-answering]] 질의응답에 BERT를 사용하려면, 기본 BERT 모델 위에 스팬(span) 분류 헤드를 추가합니다. 이 선형 레이어는 최종 은닉 상태를 받고, 답변에 대응하는 `스팬`의 시작과 끝 로그를 계산하기 위해 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 각 레이블 위치 간에 계산되어 답변에 대응하는 가장 가능성이 높은 텍스트의 스팬을 찾습니다. 질의응답에 직접 도전할 준비가 되셨나요? 완전한 [질의응답 가이드](tasks/question_answering)를 확인하여 DistilBERT를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 💡 사전훈련된 BERT를 다양한 작업에 사용하는 것이 얼마나 쉬운지 주목하세요. 사전훈련된 모델에 특정 헤드를 추가하기만 하면 은닉 상태를 원하는 출력으로 조작할 수 있습니다! </Tip> ### 텍스트 생성[[text-generation]] [GPT-2](model_doc/gpt2)는 대량의 텍스트에 대해 사전훈련된 디코딩 전용 모델입니다. 프롬프트를 주어지면 설득력 있는 (항상 사실은 아니지만!) 텍스트를 생성하고 명시적으로 훈련되지 않았음에도 불구하고 질의응답과 같은 다른 NLP 작업을 완수할 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/gpt2_architecture.png"/> </div> 1. GPT-2는 단어를 토큰화하고 토큰 임베딩을 생성하기 위해 [바이트 페어 인코딩(BPE, byte pair encoding)](tokenizer_summary#bytepair-encoding-bpe)을 사용합니다. 위치 인코딩은 시퀀스에서 각 토큰의 위치를 나타내기 위해 토큰 임베딩에 추가됩니다. 입력 임베딩은 여러 디코더 블록을 거쳐 일부 최종 은닉 상태를 출력합니다. 각 디코더 블록 내에서 GPT-2는 *마스크드 셀프 어텐션(masked self-attention)* 레이어를 사용합니다. 이는 GPT-2가 이후 토큰(future tokens)에 주의를 기울일 수 없도록 합니다. 왼쪽에 있는 토큰에만 주의를 기울일 수 있습니다. 마스크드 셀프 어텐션에서는 어텐션 마스크를 사용하여 이후 토큰에 대한 점수(score)를 `0`으로 설정하기 때문에 BERT의 [`mask`] 토큰과 다릅니다. 2. 디코더의 출력은 언어 모델링 헤드에 전달되며, 언어 모델링 헤드는 은닉 상태를 로짓으로 선형 변환을 수행합니다. 레이블은 시퀀스의 다음 토큰으로, 로짓을 오른쪽으로 하나씩 이동하여 생성됩니다. 교차 엔트로피 손실은 이동된 로짓과 레이블 간에 계산되어 가장 가능성이 높은 다음 토큰을 출력합니다. GPT-2의 사전훈련 목적은 전적으로 [인과적 언어 모델링](glossary#causal-language-modeling)에 기반하여, 시퀀스에서 다음 단어를 예측하는 것입니다. 이는 GPT-2가 텍스트 생성에 관련된 작업에 특히 우수하도록 합니다. 텍스트 생성에 직접 도전할 준비가 되셨나요? 완전한 [인과적 언어 모델링 가이드](tasks/language_modeling#causal-language-modeling)를 확인하여 DistilGPT-2를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip> ### 요약[[summarization]] [BART](model_doc/bart) 및 [T5](model_doc/t5)와 같은 인코더-디코더 모델은 요약 작업의 시퀀스-투-시퀀스 패턴을 위해 설계되었습니다. 이 섹션에서 BART의 작동 방법을 설명한 다음, 마지막에 T5를 미세 조정해볼 수 있습니다. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bart_architecture.png"/> </div> 1. BART의 인코더 아키텍처는 BERT와 매우 유사하며 텍스트의 토큰 및 위치 임베딩을 받습니다. BART는 입력을 변형시키고 디코더로 재구성하여 사전훈련됩니다. 특정 변형 기법이 있는 다른 인코더와는 달리, BART는 모든 유형의 변형을 적용할 수 있습니다. 그러나 *text infilling* 변형 기법이 가장 잘 작동합니다. Text Infiling에서는 여러 텍스트 스팬을 **단일** [`mask`] 토큰으로 대체합니다. 이는 모델이 마스크된 토큰을 예측해야 하고, 모델에 누락된 토큰의 수를 예측하도록 가르치기 때문에 중요합니다. 입력 임베딩과 마스크된 스팬이 인코더를 거쳐 최종 은닉 상태를 출력하지만, BERT와 달리 BART는 마지막에 단어를 예측하는 순방향 네트워크를 추가하지 않습니다. 2. 인코더의 출력은 디코더로 전달되며, 디코더는 인코더의 출력에서 마스크 토큰과 변형되지 않은 토큰을 예측해야 합니다. 이는 디코더가 원본 텍스트를 복원하는 데 도움이 되는 추가적인 문맥을 얻도록 합니다. 디코더의 출력은 언어 모델링 헤드에 전달되며, 언어 모델링 헤드는 은닉 상태를 로짓으로 선형 변환을 수행합니다. 교차 엔트로피 손실은 로짓과 토큰이 오른쪽으로 이동된 레이블 간에 계산됩니다. 요약에 직접 도전할 준비가 되셨나요? 완전한 [요약 가이드](tasks/summarization)를 확인하여 T5를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip> ### 번역[[translation]] 번역은 시퀀스-투-시퀀스 작업의 또 다른 예로, [BART](model_doc/bart) 또는 [T5](model_doc/t5)와 같은 인코더-디코더 모델을 사용할 수 있습니다. 이 섹션에서 BART의 작동 방법을 설명한 다음, 마지막에 T5를 미세 조정해볼 수 있습니다. BART는 원천 언어를 타겟 언어로 디코딩할 수 있는 입력에 매핑하기 위해 무작위로 초기화된 별도의 인코더를 추가하여 번역에 적용합니다. 이 새로운 인코더의 임베딩은 원본 단어 임베딩 대신 사전훈련된 인코더로 전달됩니다. 원천 인코더는 모델 출력의 교차 엔트로피 손실로부터 원천 인코더, 위치 임베딩, 입력 임베딩을 갱신하여 훈련됩니다. 첫 번째 단계에서는 모델 파라미터가 고정되고, 두 번째 단계에서는 모든 모델 파라미터가 함께 훈련됩니다. BART는 이후 번역을 위해 다양한 언어로 사전훈련된 다국어 버전의 mBART로 확장되었습니다. 번역에 직접 도전할 준비가 되셨나요? 완전한 [번역 가이드](tasks/summarization)를 확인하여 T5를 미세 조정하고 추론에 사용하는 방법을 학습하세요! <Tip> 텍스트 생성에 대한 자세한 내용은 [텍스트 생성 전략](generation_strategies) 가이드를 확인하세요! </Tip>
transformers/docs/source/ko/tasks_explained.md/0
{ "file_path": "transformers/docs/source/ko/tasks_explained.md", "repo_id": "transformers", "token_count": 25797 }
483
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Compartilhando modelos customizados A biblioteca 🤗 Transformers foi projetada para ser facilmente extensível. Cada modelo é totalmente codificado em uma determinada subpasta do repositório sem abstração, para que você possa copiar facilmente um arquivo de modelagem e ajustá-lo às suas necessidades. Se você estiver escrevendo um modelo totalmente novo, pode ser mais fácil começar do zero. Neste tutorial, mostraremos como escrever um modelo customizado e sua configuração para que possa ser usado com Transformers, e como você pode compartilhá-lo com a comunidade (com o código em que se baseia) para que qualquer pessoa possa usá-lo, mesmo se não estiver presente na biblioteca 🤗 Transformers. Ilustraremos tudo isso em um modelo ResNet, envolvendo a classe ResNet do [biblioteca timm](https://github.com/rwightman/pytorch-image-models) em um [`PreTrainedModel`]. ## Escrevendo uma configuração customizada Antes de mergulharmos no modelo, vamos primeiro escrever sua configuração. A configuração de um modelo é um objeto que terá todas as informações necessárias para construir o modelo. Como veremos na próxima seção, o modelo só pode ter um `config` para ser inicializado, então realmente precisamos que esse objeto seja o mais completo possível. Em nosso exemplo, pegaremos alguns argumentos da classe ResNet que podemos querer ajustar. Diferentes configurações nos dará os diferentes tipos de ResNets que são possíveis. Em seguida, apenas armazenamos esses argumentos, após verificar a validade de alguns deles. ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` As três coisas importantes a serem lembradas ao escrever sua própria configuração são: - você tem que herdar de `PretrainedConfig`, - o `__init__` do seu `PretrainedConfig` deve aceitar quaisquer kwargs, - esses `kwargs` precisam ser passados para a superclasse `__init__`. A herança é para garantir que você obtenha todas as funcionalidades da biblioteca 🤗 Transformers, enquanto as outras duas restrições vêm do fato de um `PretrainedConfig` ter mais campos do que os que você está configurando. Ao recarregar um config com o método `from_pretrained`, esses campos precisam ser aceitos pelo seu config e então enviados para a superclasse. Definir um `model_type` para sua configuração (aqui `model_type="resnet"`) não é obrigatório, a menos que você queira registrar seu modelo com as classes automáticas (veja a última seção). Com isso feito, você pode facilmente criar e salvar sua configuração como faria com qualquer outra configuração de modelo da biblioteca. Aqui está como podemos criar uma configuração resnet50d e salvá-la: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` Isso salvará um arquivo chamado `config.json` dentro da pasta `custom-resnet`. Você pode então recarregar sua configuração com o método `from_pretrained`: ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` Você também pode usar qualquer outro método da classe [`PretrainedConfig`], como [`~PretrainedConfig.push_to_hub`] para carregar diretamente sua configuração para o Hub. ## Escrevendo um modelo customizado Agora que temos nossa configuração ResNet, podemos continuar escrevendo o modelo. Na verdade, escreveremos dois: um que extrai os recursos ocultos de um lote de imagens (como [`BertModel`]) e um que é adequado para classificação de imagem (como [`BertForSequenceClassification`]). Como mencionamos antes, escreveremos apenas um wrapper solto do modelo para mantê-lo simples para este exemplo. A única coisa que precisamos fazer antes de escrever esta classe é um mapa entre os tipos de bloco e as classes de bloco reais. Então o modelo é definido a partir da configuração passando tudo para a classe `ResNet`: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` Para o modelo que irá classificar as imagens, vamos apenas alterar o método forward: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` Em ambos os casos, observe como herdamos de `PreTrainedModel` e chamamos a inicialização da superclasse com o `config` (um pouco parecido quando você escreve um `torch.nn.Module`). A linha que define o `config_class` não é obrigatória, a menos que você deseje registrar seu modelo com as classes automáticas (consulte a última seção). <Tip> Se o seu modelo for muito semelhante a um modelo dentro da biblioteca, você poderá reutilizar a mesma configuração desse modelo. </Tip> Você pode fazer com que seu modelo retorne o que você quiser,porém retornando um dicionário como fizemos para `ResnetModelForImageClassification`, com a função de perda incluída quando os rótulos são passados, vai tornar seu modelo diretamente utilizável dentro da classe [`Trainer`]. Você pode usar outro formato de saída, desde que esteja planejando usar seu próprio laço de treinamento ou outra biblioteca para treinamento. Agora que temos nossa classe do modelo, vamos criar uma: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` Novamente, você pode usar qualquer um dos métodos do [`PreTrainedModel`], como [`~PreTrainedModel.save_pretrained`] ou [`~PreTrainedModel.push_to_hub`]. Usaremos o segundo na próxima seção e veremos como enviar os pesos e o código do nosso modelo. Mas primeiro, vamos carregar alguns pesos pré-treinados dentro do nosso modelo. Em seu próprio caso de uso, você provavelmente estará treinando seu modelo customizado em seus próprios dados. Para este tutorial ser rápido, usaremos a versão pré-treinada do resnet50d. Como nosso modelo é apenas um wrapper em torno dele, será fácil de transferir esses pesos: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Agora vamos ver como ter certeza de que quando fazemos [`~PreTrainedModel.save_pretrained`] ou [`~PreTrainedModel.push_to_hub`], o código do modelo é salvo. ## Enviando o código para o Hub <Tip warning={true}> Esta API é experimental e pode ter algumas pequenas alterações nas próximas versões. </Tip> Primeiro, certifique-se de que seu modelo esteja totalmente definido em um arquivo `.py`. Ele pode contar com importações relativas para alguns outros arquivos desde que todos os arquivos estejam no mesmo diretório (ainda não suportamos submódulos para este recurso). Para o nosso exemplo, vamos definir um arquivo `modeling_resnet.py` e um arquivo `configuration_resnet.py` em uma pasta no diretório de trabalho atual chamado `resnet_model`. O arquivo de configuração contém o código para `ResnetConfig` e o arquivo de modelagem contém o código do `ResnetModel` e `ResnetModelForImageClassification`. ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` O `__init__.py` pode estar vazio, apenas está lá para que o Python detecte que o `resnet_model` possa ser usado como um módulo. <Tip warning={true}> Se estiver copiando arquivos de modelagem da biblioteca, você precisará substituir todas as importações relativas na parte superior do arquivo para importar do pacote `transformers`. </Tip> Observe que você pode reutilizar (ou subclasse) uma configuração/modelo existente. Para compartilhar seu modelo com a comunidade, siga estas etapas: primeiro importe o modelo ResNet e a configuração do arquivos criados: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` Então você tem que dizer à biblioteca que deseja copiar os arquivos de código desses objetos ao usar o `save_pretrained` e registrá-los corretamente com uma determinada classe automáticas (especialmente para modelos), basta executar: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` Observe que não há necessidade de especificar uma classe automática para a configuração (há apenas uma classe automática, [`AutoConfig`]), mas é diferente para os modelos. Seu modelo customizado pode ser adequado para muitas tarefas diferentes, então você tem que especificar qual das classes automáticas é a correta para o seu modelo. Em seguida, vamos criar a configuração e os modelos como fizemos antes: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Agora para enviar o modelo para o Hub, certifique-se de estar logado. Ou execute no seu terminal: ```bash huggingface-cli login ``` ou a partir do notebook: ```py from huggingface_hub import notebook_login notebook_login() ``` Você pode então enviar para seu próprio namespace (ou uma organização da qual você é membro) assim: ```py resnet50d.push_to_hub("custom-resnet50d") ``` Além dos pesos do modelo e da configuração no formato json, isso também copiou o modelo e configuração `.py` na pasta `custom-resnet50d` e carregou o resultado para o Hub. Você pode conferir o resultado neste [repositório de modelos](https://huggingface.co/sgugger/custom-resnet50d). Consulte o [tutorial de compartilhamento](model_sharing) para obter mais informações sobre o método push_to_hub. ## Usando um modelo com código customizado Você pode usar qualquer configuração, modelo ou tokenizador com arquivos de código customizados em seu repositório com as classes automáticas e o método `from_pretrained`. Todos os arquivos e códigos carregados no Hub são verificados quanto a malware (consulte a documentação de [Segurança do Hub](https://huggingface.co/docs/hub/security#malware-scanning) para obter mais informações), mas você ainda deve revisar o código do modelo e o autor para evitar a execução de código malicioso em sua máquina. Defina `trust_remote_code=True` para usar um modelo com código customizado: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` Também é fortemente recomendado passar um hash de confirmação como uma `revisão` para garantir que o autor dos modelos não atualize o código com novas linhas maliciosas (a menos que você confie totalmente nos autores dos modelos). ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` Observe que ao navegar no histórico de commits do repositório do modelo no Hub, há um botão para copiar facilmente o commit hash de qualquer commit. ## Registrando um modelo com código customizado para as classes automáticas Se você estiver escrevendo uma biblioteca que estende 🤗 Transformers, talvez queira estender as classes automáticas para incluir seus próprios modelos. Isso é diferente de enviar o código para o Hub no sentido de que os usuários precisarão importar sua biblioteca para obter os modelos customizados (ao contrário de baixar automaticamente o código do modelo do Hub). Desde que sua configuração tenha um atributo `model_type` diferente dos tipos de modelo existentes e que as classes do seu modelo tenha os atributos `config_class` corretos, você pode simplesmente adicioná-los às classes automáticas assim: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` Observe que o primeiro argumento usado ao registrar sua configuração customizada para [`AutoConfig`] precisa corresponder ao `model_type` de sua configuração customizada. E o primeiro argumento usado ao registrar seus modelos customizados, para qualquer necessidade de classe de modelo automático deve corresponder ao `config_class` desses modelos.
transformers/docs/source/pt/custom_models.md/0
{ "file_path": "transformers/docs/source/pt/custom_models.md", "repo_id": "transformers", "token_count": 5915 }
484
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Examples We host a wide range of example scripts for multiple learning frameworks. Simply choose your favorite: [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) or [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). We also have some [research projects](https://github.com/huggingface/transformers/tree/main/examples/research_projects), as well as some [legacy examples](https://github.com/huggingface/transformers/tree/main/examples/legacy). Note that unlike the main examples these are not actively maintained, and may require specific older versions of dependencies in order to run. While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the-box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data, allowing you to tweak and edit them as required. Please discuss on the [forum](https://discuss.huggingface.co/) or in an [issue](https://github.com/huggingface/transformers/issues) a feature you would like to implement in an example before submitting a PR; we welcome bug fixes, but since we want to keep the examples as simple as possible it's unlikely that we will merge a pull request adding more functionality at the cost of readability. ## Important note **Important** To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` Then cd in the example folder of your choice and run ```bash pip install -r requirements.txt ``` To browse the examples corresponding to released versions of 🤗 Transformers, click on the line below and then on your desired version of the library: <details> <summary>Examples for older versions of 🤗 Transformers</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.21.0/examples">v4.21.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.20.1/examples">v4.20.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.19.4/examples">v4.19.4</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.18.0/examples">v4.18.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.17.0/examples">v4.17.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.16.2/examples">v4.16.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.15.0/examples">v4.15.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.14.1/examples">v4.14.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.13.0/examples">v4.13.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.12.5/examples">v4.12.5</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.11.3/examples">v4.11.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.10.3/examples">v4.10.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.9.2/examples">v4.9.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.8.2/examples">v4.8.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.7.0/examples">v4.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.6.1/examples">v4.6.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> Alternatively, you can switch your cloned 🤗 Transformers to a specific version (for instance with v3.5.1) with ```bash git checkout tags/v3.5.1 ``` and run the example command as usual afterward. ## Running the Examples on Remote Hardware with Auto-Setup [run_on_remote.py](./run_on_remote.py) is a script that launches any example on remote self-hosted hardware, with automatic hardware and environment setup. It uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own cloud account or on-premise cluster) but there are other options for running remotely as well. You can easily customize the example used, command line arguments, dependencies, and type of compute hardware, and then run the script to automatically launch the example. You can refer to [hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup) for more information about hardware and dependency setup with Runhouse, or this [Colab tutorial](https://colab.research.google.com/drive/1sh_aNQzJX5BKAdNeXthTNGxKz7sM9VPc) for a more in-depth walkthrough. You can run the script with the following commands: ```bash # First install runhouse: pip install runhouse # For an on-demand V100 with whichever cloud provider you have configured: python run_on_remote.py \ --example pytorch/text-generation/run_generation.py \ --model_type=gpt2 \ --model_name_or_path=openai-community/gpt2 \ --prompt "I am a language model and" # For byo (bring your own) cluster: python run_on_remote.py --host <cluster_ip> --user <ssh_user> --key_path <ssh_key_path> \ --example <example> <args> # For on-demand instances python run_on_remote.py --instance <instance> --provider <provider> \ --example <example> <args> ``` You can also adapt the script to your own needs.
transformers/examples/README.md/0
{ "file_path": "transformers/examples/README.md", "repo_id": "transformers", "token_count": 3290 }
485
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for question answering. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. import json import logging import math import os import random import sys import time import warnings from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import evaluate import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import HfApi from tqdm import tqdm from utils_qa import postprocess_qa_predictions import transformers from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, FlaxAutoModelForQuestionAnswering, HfArgumentParser, PreTrainedTokenizerFast, is_tensorboard_available, ) from transformers.utils import check_min_version, send_example_telemetry logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.41.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any # region Arguments @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=384, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" " batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, some of the examples do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={ "help": ( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) n_best_size: int = field( default=20, metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) def __post_init__(self): if ( self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None ): raise ValueError("Need either a dataset name or a training/validation file/test_file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.test_file is not None: extension = self.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." # endregion # region Create a train state def create_train_state( model: FlaxAutoModelForQuestionAnswering, learning_rate_fn: Callable[[int], float], num_labels: int, training_args: TrainingArguments, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) def cross_entropy_loss(logits, labels): start_loss = optax.softmax_cross_entropy(logits[0], onehot(labels[0], num_classes=num_labels)) end_loss = optax.softmax_cross_entropy(logits[1], onehot(labels[1], num_classes=num_labels)) xentropy = (start_loss + end_loss) / 2.0 return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits, loss_fn=cross_entropy_loss, ) # endregion # region Create learning rate function def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn # endregion # region train data iterator def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch # endregion # region eval data iterator def eval_data_collator(dataset: Dataset, batch_size: int): """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.""" batch_idx = np.arange(len(dataset)) steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_qa", model_args, data_args, framework="flax") # endregion # region Logging # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # endregion # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # region Load Data # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading the dataset from local csv or json file. data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load pretrained model and tokenizer # # Load pretrained model and tokenizer config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # endregion # region Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" " this requirement" ) # endregion # region Preprocessing the datasets # Preprocessing is slightly different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: column_names = raw_datasets["test"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples processed_raw_datasets = {} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) processed_raw_datasets["train"] = train_dataset # Validation preprocessing def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) processed_raw_datasets["validation"] = eval_dataset if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) processed_raw_datasets["test"] = predict_dataset # endregion # region Metrics and Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = evaluate.load( "squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir ) def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """ Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """ step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64) # Now since we have create an array now we will populate it with the outputs of the model. for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step batch_size = output_logit.shape[0] cols = output_logit.shape[1] if step + batch_size < len(dataset): logits_concat[step : step + batch_size, :cols] = output_logit else: logits_concat[step:, :cols] = output_logit[: len(dataset) - step] step += batch_size return logits_concat # endregion # region Training steps and logging init train_dataset = processed_raw_datasets["train"] eval_dataset = processed_raw_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Define a summary writer has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(training_args.output_dir) summary_writer.hparams({**training_args.to_dict(), **vars(model_args), **vars(data_args)}) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) num_epochs = int(training_args.num_train_epochs) rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.local_device_count() # endregion # region Load model model = FlaxAutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), ) learning_rate_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) state = create_train_state(model, learning_rate_fn, num_labels=max_seq_length, training_args=training_args) # endregion # region Define train step functions def train_step( state: train_state.TrainState, batch: Dict[str, Array], dropout_rng: PRNGKey ) -> Tuple[train_state.TrainState, float]: """Trains model with an optimizer (both in `state`) on `batch`, returning a pair `(new_state, loss)`.""" dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) start_positions = batch.pop("start_positions") end_positions = batch.pop("end_positions") targets = (start_positions, end_positions) def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True) loss = state.loss_fn(logits, targets) return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean({"loss": loss, "learning_rate": learning_rate_fn(state.step)}, axis_name="batch") return new_state, metrics, new_dropout_rng p_train_step = jax.pmap(train_step, axis_name="batch", donate_argnums=(0,)) # endregion # region Define eval step functions def eval_step(state, batch): logits = state.apply_fn(**batch, params=state.params, train=False) return state.logits_fn(logits) p_eval_step = jax.pmap(eval_step, axis_name="batch") # endregion # region Define train and eval loop logger.info(f"===== Starting training ({num_epochs} epochs) =====") train_time = 0 # make sure weights are replicated on each device state = replicate(state) train_time = 0 step_per_epoch = len(train_dataset) // train_batch_size total_steps = step_per_epoch * num_epochs epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: train_start = time.time() train_metrics = [] # Create sampling rng rng, input_rng = jax.random.split(rng) # train for step, batch in enumerate( tqdm( train_data_collator(input_rng, train_dataset, train_batch_size), total=step_per_epoch, desc="Training...", position=1, ), 1, ): state, train_metric, dropout_rngs = p_train_step(state, batch, dropout_rngs) train_metrics.append(train_metric) cur_step = epoch * step_per_epoch + step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step}/{total_steps} | Training Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if ( training_args.do_eval and (cur_step % training_args.eval_steps == 0 or cur_step % step_per_epoch == 0) and cur_step > 0 ): eval_metrics = {} all_start_logits = [] all_end_logits = [] # evaluate for batch in tqdm( eval_data_collator(eval_dataset, eval_batch_size), total=math.ceil(len(eval_dataset) / eval_batch_size), desc="Evaluating ...", position=2, ): _ = batch.pop("example_id") _ = batch.pop("offset_mapping") predictions = pad_shard_unpad(p_eval_step)( state, batch, min_device_batch=per_device_eval_batch_size ) start_logits = np.array(predictions[0]) end_logits = np.array(predictions[1]) all_start_logits.append(start_logits) all_end_logits.append(end_logits) max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor # concatenate the numpy array start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len) end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len) # delete the list of numpy arrays del all_start_logits del all_end_logits outputs_numpy = (start_logits_concat, end_logits_concat) prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metrics = compute_metrics(prediction) logger.info(f"Step... ({cur_step}/{total_steps} | Evaluation metrics: {eval_metrics})") if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, cur_step) if (cur_step % training_args.save_steps == 0 and cur_step > 0) or (cur_step == total_steps): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(unreplicate(state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: api.upload_folder( commit_message=f"Saving weights and logs of step {cur_step}", folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) epochs.desc = f"Epoch ... {epoch + 1}/{num_epochs}" # endregion # Eval after training if training_args.do_eval: eval_metrics = {} all_start_logits = [] all_end_logits = [] eval_loader = eval_data_collator(eval_dataset, eval_batch_size) for batch in tqdm( eval_loader, total=math.ceil(len(eval_dataset) / eval_batch_size), desc="Evaluating ...", position=2 ): _ = batch.pop("example_id") _ = batch.pop("offset_mapping") predictions = pad_shard_unpad(p_eval_step)(state, batch, min_device_batch=per_device_eval_batch_size) start_logits = np.array(predictions[0]) end_logits = np.array(predictions[1]) all_start_logits.append(start_logits) all_end_logits.append(end_logits) max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor # concatenate the numpy array start_logits_concat = create_and_fill_np_array(all_start_logits, eval_dataset, max_len) end_logits_concat = create_and_fill_np_array(all_end_logits, eval_dataset, max_len) # delete the list of numpy arrays del all_start_logits del all_end_logits outputs_numpy = (start_logits_concat, end_logits_concat) prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metrics = compute_metrics(prediction) if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} path = os.path.join(training_args.output_dir, "eval_results.json") with open(path, "w") as f: json.dump(eval_metrics, f, indent=4, sort_keys=True) if __name__ == "__main__": main()
transformers/examples/flax/question-answering/run_qa.py/0
{ "file_path": "transformers/examples/flax/question-answering/run_qa.py", "repo_id": "transformers", "token_count": 20250 }
486
#### Fine-tuning BERT on SQuAD1.0 with relative position embeddings The following examples show how to fine-tune BERT models with different relative position embeddings. The BERT model `google-bert/bert-base-uncased` was pretrained with default absolute position embeddings. We provide the following pretrained models which were pre-trained on the same training data (BooksCorpus and English Wikipedia) as in the BERT model training, but with different relative position embeddings. * `zhiheng-huang/bert-base-uncased-embedding-relative-key`, trained from scratch with relative embedding proposed by Shaw et al., [Self-Attention with Relative Position Representations](https://arxiv.org/abs/1803.02155) * `zhiheng-huang/bert-base-uncased-embedding-relative-key-query`, trained from scratch with relative embedding method 4 in Huang et al. [Improve Transformer Models with Better Relative Position Embeddings](https://arxiv.org/abs/2009.13658) * `zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query`, fine-tuned from model `google-bert/bert-large-uncased-whole-word-masking` with 3 additional epochs with relative embedding method 4 in Huang et al. [Improve Transformer Models with Better Relative Position Embeddings](https://arxiv.org/abs/2009.13658) ##### Base models fine-tuning ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path zhiheng-huang/bert-base-uncased-embedding-relative-key-query \ --dataset_name squad \ --do_train \ --do_eval \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 512 \ --doc_stride 128 \ --output_dir relative_squad \ --per_device_eval_batch_size=60 \ --per_device_train_batch_size=6 ``` Training with the above command leads to the following results. It boosts the BERT default from f1 score of 88.52 to 90.54. ```bash 'exact': 83.6802270577105, 'f1': 90.54772098174814 ``` The change of `max_seq_length` from 512 to 384 in the above command leads to the f1 score of 90.34. Replacing the above model `zhiheng-huang/bert-base-uncased-embedding-relative-key-query` with `zhiheng-huang/bert-base-uncased-embedding-relative-key` leads to the f1 score of 89.51. The changing of 8 gpus to one gpu training leads to the f1 score of 90.71. ##### Large models fine-tuning ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path zhiheng-huang/bert-large-uncased-whole-word-masking-embedding-relative-key-query \ --dataset_name squad \ --do_train \ --do_eval \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 512 \ --doc_stride 128 \ --output_dir relative_squad \ --per_gpu_eval_batch_size=6 \ --per_gpu_train_batch_size=2 \ --gradient_accumulation_steps 3 ``` Training with the above command leads to the f1 score of 93.52, which is slightly better than the f1 score of 93.15 for `google-bert/bert-large-uncased-whole-word-masking`. #### Distributed training Here is an example using distributed training on 8 V100 GPUs and Bert Whole Word Masking uncased model to reach a F1 > 93 on SQuAD1.1: ```bash torchrun --nproc_per_node=8 ./examples/question-answering/run_squad.py \ --model_name_or_path google-bert/bert-large-uncased-whole-word-masking \ --dataset_name squad \ --do_train \ --do_eval \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir ./examples/models/wwm_uncased_finetuned_squad/ \ --per_device_eval_batch_size=3 \ --per_device_train_batch_size=3 \ ``` Training with the previously defined hyper-parameters yields the following results: ```bash f1 = 93.15 exact_match = 86.91 ``` This fine-tuned model is available as a checkpoint under the reference [`google-bert/bert-large-uncased-whole-word-masking-finetuned-squad`](https://huggingface.co/google-bert/bert-large-uncased-whole-word-masking-finetuned-squad). ## Results Larger batch size may improve the performance while costing more memory. ##### Results for SQuAD1.0 with the previously defined hyper-parameters: ```python { "exact": 85.45884578997162, "f1": 92.5974600601065, "total": 10570, "HasAns_exact": 85.45884578997162, "HasAns_f1": 92.59746006010651, "HasAns_total": 10570 } ``` ##### Results for SQuAD2.0 with the previously defined hyper-parameters: ```python { "exact": 80.4177545691906, "f1": 84.07154997729623, "total": 11873, "HasAns_exact": 76.73751686909581, "HasAns_f1": 84.05558584352873, "HasAns_total": 5928, "NoAns_exact": 84.0874684608915, "NoAns_f1": 84.0874684608915, "NoAns_total": 5945 } ```
transformers/examples/legacy/question-answering/README.md/0
{ "file_path": "transformers/examples/legacy/question-answering/README.md", "repo_id": "transformers", "token_count": 1768 }
487
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import fire def minify(src_dir: str, dest_dir: str, n: int): """Write first n lines of each file f in src_dir to dest_dir/f""" src_dir = Path(src_dir) dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) for path in src_dir.iterdir(): new = [x.rstrip() for x in list(path.open().readlines())][:n] dest_path = dest_dir.joinpath(path.name) print(dest_path) dest_path.open("w").write("\n".join(new)) if __name__ == "__main__": fire.Fire(minify)
transformers/examples/legacy/seq2seq/minify_dataset.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/minify_dataset.py", "repo_id": "transformers", "token_count": 398 }
488
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from filelock import FileLock try: import nltk NLTK_AVAILABLE = True except (ImportError, ModuleNotFoundError): NLTK_AVAILABLE = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def add_newline_to_end_of_each_sentence(x: str) -> str: """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" re.sub("<n>", "", x) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(x))
transformers/examples/legacy/seq2seq/sentence_splitter.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/sentence_splitter.py", "repo_id": "transformers", "token_count": 403 }
489