text
stringlengths
7
324k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
463
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 563, "logprob": null, "text": "def" }, { "id": 942, "logprob": -5.1367188, "text": " print" }, { "id": 62, "logprob": -0.24450684, "text": "_" }, { "id": 7196, "logprob": -6.9609375, "text": "hello" } ], "seed": null, "tokens": [ { "id": 1241, "logprob": -0.9863281, "special": false, "text": "():" }, { "id": 258, "logprob": -0.21447754, "special": false, "text": "\n " }, { "id": 942, "logprob": -0.43701172, "special": false, "text": " print" }, { "id": 372, "logprob": -0.5361328, "special": false, "text": "(\"" }, { "id": 7371, "logprob": -0.44555664, "special": false, "text": "Hello" }, { "id": 9956, "logprob": -1.2412109, "special": false, "text": " World" }, { "id": 8657, "logprob": -0.7583008, "special": false, "text": "!\")" }, { "id": 185, "logprob": -0.76171875, "special": false, "text": "\n" }, { "id": 185, "logprob": -0.20837402, "special": false, "text": "\n" }, { "id": 1018, "logprob": -1.2470703, "special": false, "text": "print" } ] }, "generated_text": "():\n print(\"Hello World!\")\n\nprint" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_santacoder/test_flash_santacoder.json", "repo_id": "text-generation-inference", "token_count": 1111 }
200
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.83984375, "text": " is" }, { "id": 18147, "logprob": -12.8125, "text": " Deep" }, { "id": 20727, "logprob": -2.84375, "text": " Learning" }, { "id": 32, "logprob": -1.25, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.37890625, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.4296875, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.078125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.515625, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.6015625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.65625, "special": false, "text": " a" }, { "id": 747, "logprob": -2.109375, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.328125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0032653809, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.28125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1276, "logprob": null, "text": "What" }, { "id": 310, "logprob": -0.80078125, "text": " is" }, { "id": 18147, "logprob": -13.25, "text": " Deep" }, { "id": 20727, "logprob": -2.828125, "text": " Learning" }, { "id": 32, "logprob": -1.1953125, "text": "?" } ], "seed": null, "tokens": [ { "id": 187, "logprob": -0.296875, "special": false, "text": "\n" }, { "id": 187, "logprob": -0.3359375, "special": false, "text": "\n" }, { "id": 30763, "logprob": -1.2578125, "special": false, "text": "Deep" }, { "id": 4715, "logprob": -0.5546875, "special": false, "text": " learning" }, { "id": 310, "logprob": -0.62890625, "special": false, "text": " is" }, { "id": 247, "logprob": -0.64453125, "special": false, "text": " a" }, { "id": 747, "logprob": -2.078125, "special": false, "text": " new" }, { "id": 1511, "logprob": -2.28125, "special": false, "text": " type" }, { "id": 273, "logprob": -0.0030670166, "special": false, "text": " of" }, { "id": 5145, "logprob": -1.3125, "special": false, "text": " machine" } ], "top_tokens": null }, "generated_text": "\n\nDeep learning is a new type of machine" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mamba/test_mamba_load.json", "repo_id": "text-generation-inference", "token_count": 5458 }
201
{ "choices": [ { "delta": { "content": null, "role": "assistant", "tool_calls": { "function": { "arguments": "</s>", "name": null }, "id": "", "index": 20, "type": "function" } }, "finish_reason": "eos_token", "index": 20, "logprobs": null } ], "created": 1709087088, "id": "", "model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "object": "text_completion", "system_fingerprint": "1.4.3-native" }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_stream.json", "repo_id": "text-generation-inference", "token_count": 319 }
202
import pytest @pytest.fixture(scope="module") def flash_santacoder_handle(launcher): with launcher("bigcode/santacoder") as handle: yield handle @pytest.fixture(scope="module") async def flash_santacoder(flash_santacoder_handle): await flash_santacoder_handle.health(300) return flash_santacoder_handle.client @pytest.mark.asyncio async def test_flash_santacoder(flash_santacoder, response_snapshot): response = await flash_santacoder.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_santacoder_load( flash_santacoder, generate_load, response_snapshot ): responses = await generate_load( flash_santacoder, "def print_hello", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_santacoder.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_santacoder.py", "repo_id": "text-generation-inference", "token_count": 387 }
203
//! Text Generation gRPC client library mod client; #[allow(clippy::derive_partial_eq_without_eq)] mod pb; mod sharded_client; pub use client::Client; pub use pb::generate::v2::HealthResponse; pub use pb::generate::v2::InfoResponse as ShardInfo; pub use pb::generate::v2::{ Batch, CachedBatch, FinishReason, GeneratedText, Generation, GrammarType, NextTokenChooserParameters, Request, StoppingCriteriaParameters, Tokens, }; pub use sharded_client::ShardedClient; use thiserror::Error; use tonic::transport; use tonic::Status; #[derive(Error, Debug, Clone)] pub enum ClientError { #[error("Could not connect to Text Generation server: {0}")] Connection(String), #[error("Server error: {0}")] Generation(String), #[error("Sharded results are empty")] EmptyResults, } impl From<Status> for ClientError { fn from(err: Status) -> Self { let err = Self::Generation(err.message().to_string()); tracing::error!("{err}"); err } } impl From<transport::Error> for ClientError { fn from(err: transport::Error) -> Self { let err = Self::Connection(err.to_string()); tracing::error!("{err}"); err } } pub type Result<T> = std::result::Result<T, ClientError>;
text-generation-inference/router/client/src/lib.rs/0
{ "file_path": "text-generation-inference/router/client/src/lib.rs", "repo_id": "text-generation-inference", "token_count": 464 }
204
# Fork that adds only the correct stream to this kernel in order # to make cuda graphs work. awq_commit := bd1dc2d5254345cc76ab71894651fb821275bdd4 awq: rm -rf llm-awq git clone https://github.com/huggingface/llm-awq build-awq: awq cd llm-awq/ && git fetch && git checkout $(awq_commit) cd llm-awq/awq/kernels && python setup.py build install-awq: build-awq pip uninstall awq_inference_engine -y || true cd llm-awq/awq/kernels && python setup.py install
text-generation-inference/server/Makefile-awq/0
{ "file_path": "text-generation-inference/server/Makefile-awq", "repo_id": "text-generation-inference", "token_count": 183 }
205
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _q4_matmul_cuh #define _q4_matmul_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #include "q4_matrix.cuh" #include "../tuning.h" void q4_matmul_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, const Q4Matrix* w, half* out, bool no_zero, cudaStream_t alt_stream ); void q4_matmul_recons_cuda ( ExLlamaTuning* tuningParams, const half* x, const int x_height, Q4Matrix* w, half* out, bool no_zero, const cublasHandle_t handle ); #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/q4_matmul.cuh", "repo_id": "text-generation-inference", "token_count": 322 }
206
#include "compat.cuh" __forceinline__ __device__ half2 dot22_8(half2(&dq)[4], const half* a_ptr, const half2 g_result) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __hadd2(result, g_result); } __forceinline__ __device__ float dot22_8_f(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return __half2float(__low2half(result)) + __half2float(__high2half(result)); } __forceinline__ __device__ half2 dot22_8_h2(half2(&dq)[4], const half* a_ptr) { half2 result = {}; const half2* a2_ptr = (const half2*)a_ptr; #pragma unroll for (int i = 0; i < 4; i++) result = __hfma2(dq[i], *a2_ptr++, result); return result; } typedef void (*fp_gemm_half_q_half_gptq_kernel) ( const half*, const uint32_t*, const uint32_t*, const half*, half*, const int, const int, const int, const int, const int, const uint16_t*, const int, const bool, const half*, const int ); template <int m_count, bool use_r_weights, bool mul_r_weights> __global__ void gemm_half_q_half_gptq_kernel ( const half* __restrict__ a, const uint32_t* __restrict__ b_q_weight, const uint32_t* __restrict__ b_gptq_qzeros, const half* __restrict__ b_gptq_scales, half* __restrict__ c, const int size_m, const int size_n, const int size_k, const int groups, const int groupsize, const uint16_t* __restrict__ b_q_perm, const int rows_4, const bool clear, const half* r_weights, const int r_weights_stride ) { MatrixView_half a_(a, size_m, size_k); MatrixView_half_rw c_(c, size_m, size_n); MatrixView_q4_row b_gptq_qzeros_(b_gptq_qzeros, groups, size_n); MatrixView_half b_gptq_scales_(b_gptq_scales, groups, size_n); int t = threadIdx.x; // Block int offset_n = blockIdx.x * GPTQ_BLOCK_KN_SIZE * 4; int offset_m = blockIdx.y * m_count; int offset_k = blockIdx.z * GPTQ_BLOCK_KN_SIZE; int end_n = min(offset_n + GPTQ_BLOCK_KN_SIZE * 4, size_n); int end_m = min(offset_m + m_count, size_m); int end_k = min(offset_k + GPTQ_BLOCK_KN_SIZE, size_k); int n = offset_n + t * 4; // Read weights half_uint16 weights[MAX_Q_GEMM_WEIGHTS]; if constexpr (use_r_weights) { uint16_t any_w = 0; const half* w_ptr = r_weights; for (int m = 0; m < m_count; ++m) { weights[m].as_half = *w_ptr; w_ptr += r_weights_stride; any_w |= weights[m].as_uint16; } if (!any_w) return; // Early exit if all weights are zero -- does not zero output (!!!) } // Preload block_a __shared__ half block_a[m_count][GPTQ_BLOCK_KN_SIZE]; if (offset_k + t < end_k) { for (int m = 0; m < m_count; ++m) { const half* a_ptr = a_.item_ptr(offset_m + m, 0); half* block_a_ptr = block_a[m]; half a0; if (b_q_perm) a0 = a_ptr[b_q_perm[offset_k + t]]; else a0 = a_ptr[offset_k + t]; block_a_ptr[t] = a0; } } // Zero output if (n >= size_n) return; if (clear && blockIdx.z == 0) // && (threadIdx.x & 1) == 0) { for (int m = 0; m < m_count; m++) *((uint64_t*)c_.item_ptr(offset_m + m, n)) = 0; } __syncthreads(); // Find initial group int group = offset_k / groupsize; int nextgroup = offset_k + groupsize; // a, b offset int qk = offset_k / (32 / 4); const uint32_t* b_ptr = b_q_weight + qk * size_n + n; const half* a_ptr = &block_a[0][0]; int a_stride = GPTQ_BLOCK_KN_SIZE; // Initial group int zeros[4]; half2 scales[4]; half2 z1z16[4][2]; half2 y1y16[4][2]; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); // __syncthreads(); // Column result half2 block_c[m_count][4] = {}; // Dequantize and multiply int k = offset_k; while (k < end_k) { if (k == nextgroup) { group++; nextgroup += groupsize; b_gptq_qzeros_.item4(zeros, group, n); b_gptq_scales_.item4_h2(scales, group, n); dequant_4bit_8_prep_zero((zeros[0] + 1) & 0x0F, z1z16[0], y1y16[0]); dequant_4bit_8_prep_zero((zeros[1] + 1) & 0x0F, z1z16[1], y1y16[1]); dequant_4bit_8_prep_zero((zeros[2] + 1) & 0x0F, z1z16[2], y1y16[2]); dequant_4bit_8_prep_zero((zeros[3] + 1) & 0x0F, z1z16[3], y1y16[3]); } #pragma unroll for (int j = 0; j < 4; j++) { const int4* b_ptr4 = (int4*) b_ptr; int4 load_int4 = *b_ptr4; half2 dq[4][4]; dequant_4bit_8_gptq(load_int4.x, dq[0], z1z16[0], y1y16[0], size_n, false); dequant_4bit_8_gptq(load_int4.y, dq[1], z1z16[1], y1y16[1], size_n, false); dequant_4bit_8_gptq(load_int4.z, dq[2], z1z16[2], y1y16[2], size_n, false); dequant_4bit_8_gptq(load_int4.w, dq[3], z1z16[3], y1y16[3], size_n, false); #pragma unroll for (int m = 0; m < m_count; m++) { if constexpr (use_r_weights) { if (!weights[m].as_uint16) continue; } block_c[m][0] = __hfma2(dot22_8_h2(dq[0], a_ptr + m * a_stride), scales[0], block_c[m][0]); block_c[m][1] = __hfma2(dot22_8_h2(dq[1], a_ptr + m * a_stride), scales[1], block_c[m][1]); block_c[m][2] = __hfma2(dot22_8_h2(dq[2], a_ptr + m * a_stride), scales[2], block_c[m][2]); block_c[m][3] = __hfma2(dot22_8_h2(dq[3], a_ptr + m * a_stride), scales[3], block_c[m][3]); } b_ptr += size_n; a_ptr += 8; } k += 32; } for (int m = 0; m < m_count; m++) { half2 *out = (half2*) c_.item_ptr(offset_m + m, n); half result0 = __hadd(__low2half(block_c[m][0]), __high2half(block_c[m][0])); half result1 = __hadd(__low2half(block_c[m][1]), __high2half(block_c[m][1])); half result2 = __hadd(__low2half(block_c[m][2]), __high2half(block_c[m][2])); half result3 = __hadd(__low2half(block_c[m][3]), __high2half(block_c[m][3])); half2 result01 = __halves2half2(result0, result1); half2 result23 = __halves2half2(result2, result3); if constexpr (mul_r_weights) { half2 w_mul2 = __half2half2(weights[m].as_half); result01 = __hmul2(result01, w_mul2); result23 = __hmul2(result23, w_mul2); } atomicAdd(out , result01); atomicAdd(out + 1, result23); } } template <bool use_r_weights, bool mul_r_weights> struct map_m_count_gptq { static constexpr fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(int m_count) { #if GPTQ_BLOCK_M_SIZE_MAX >= 1 if (m_count == 1) return gemm_half_q_half_gptq_kernel<1, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 2 if (m_count == 2) return gemm_half_q_half_gptq_kernel<2, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 3 if (m_count == 3) return gemm_half_q_half_gptq_kernel<3, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 4 if (m_count == 4) return gemm_half_q_half_gptq_kernel<4, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 5 if (m_count == 5) return gemm_half_q_half_gptq_kernel<5, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 6 if (m_count == 6) return gemm_half_q_half_gptq_kernel<6, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 7 if (m_count == 7) return gemm_half_q_half_gptq_kernel<7, use_r_weights, mul_r_weights>; #endif #if GPTQ_BLOCK_M_SIZE_MAX >= 8 if (m_count == 8) return gemm_half_q_half_gptq_kernel<8, use_r_weights, mul_r_weights>; #endif return NULL; } }; fp_gemm_half_q_half_gptq_kernel pick_gemm_half_q_half_gptq_kernel(const int m_count, bool r_weights, bool mul_r_weights) { if (!r_weights && !mul_r_weights) return map_m_count_gptq<false, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if (!r_weights && mul_r_weights) return map_m_count_gptq<false, true>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && !mul_r_weights) return map_m_count_gptq< true, false>::pick_gemm_half_q_half_gptq_kernel(m_count); if ( r_weights && mul_r_weights) return map_m_count_gptq< true, true>::pick_gemm_half_q_half_gptq_kernel(m_count); return NULL; }
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm_kernel_gptq.cuh", "repo_id": "text-generation-inference", "token_count": 4839 }
207
import torch import grpc from google.rpc import status_pb2, code_pb2 from grpc_status import rpc_status from grpc_interceptor.server import AsyncServerInterceptor from loguru import logger from typing import Callable, Any class ExceptionInterceptor(AsyncServerInterceptor): async def intercept( self, method: Callable, request_or_iterator: Any, context: grpc.ServicerContext, method_name: str, ) -> Any: try: response = method(request_or_iterator, context) return await response except Exception as err: method_name = method_name.split("/")[-1] logger.exception(f"Method {method_name} encountered an error.") if torch.cuda.is_available(): torch.cuda.empty_cache() await context.abort_with_status( rpc_status.to_status( status_pb2.Status(code=code_pb2.INTERNAL, message=str(err)) ) )
text-generation-inference/server/text_generation_server/interceptor.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/interceptor.py", "repo_id": "text-generation-inference", "token_count": 449 }
208
# coding=utf-8 # Copyright 2024 Starcoder2 AI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, FastRMSNorm, FastLayerNorm, ) class Starcoder2Config(PretrainedConfig): model_type = "starcoder2" def __init__( self, vocab_size=49152, hidden_size=3072, intermediate_size=12288, num_hidden_layers=30, num_attention_heads=24, num_key_value_heads=2, mlp_type="default", hidden_act="gelu_pytorch_tanh", max_position_embeddings=4096, initializer_range=0.018042, norm_type="layer_norm", norm_epsilon=1e-5, use_cache=True, bos_token_id=50256, eos_token_id=50256, rope_theta=10000.0, sliding_window=None, attention_dropout=0.0, residual_dropout=0.0, embedding_dropout=0.0, use_bias: bool = True, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window self.use_bias = use_bias # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.mlp_type = mlp_type self.hidden_act = hidden_act self.initializer_range = initializer_range self.norm_type = norm_type self.norm_epsilon = norm_epsilon self.use_cache = use_cache self.rope_theta = rope_theta self.attention_dropout = attention_dropout self.residual_dropout = residual_dropout self.embedding_dropout = embedding_dropout super().__init__( bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=config.use_bias, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" if config.use_bias: w = [ weights.get_sharded(f"{p}.bias", dim=0) for p in [f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"] ] bias = torch.cat(w, dim=0).to(dtype=weights.dtype).to(device=weights.device) else: bias = None return TensorParallelColumnLinear( get_linear(weight, bias=bias, quantize=config.quantize) ) class Starcoder2Attention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.max_past = ( config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size, base=config.rope_theta, device=weights.device, ) self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=config.use_bias, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv paged_attention.reshape_and_cache( kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, window_size_left=self.max_past, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) class Starcoder2MLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj self.c_fc = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.c_fc", weights=weights, bias=config.use_bias, ) self.c_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.c_proj", weights=weights, bias=config.use_bias, ) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) return self.c_proj(hidden_states) class Starcoder2GatedMLP(nn.Module): def __init__(self, prefix, config, weights): super().__init__() act = config.hidden_act self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj self.gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=config.use_bias, ) self.down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=config.use_bias, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states): gate_up_states = self.gate_up_proj(hidden_states) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj(self.act(gate_up_states[:, 0]) * gate_up_states[:, 1]) STARCODER2_NORMALIZATION_CLASSES = { "layer_norm": FastLayerNorm, "rms_norm": FastRMSNorm, } STARCODER2_MLP_CLASSES = { "default": Starcoder2MLP, "gated": Starcoder2GatedMLP, } class Starcoder2Layer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"model.layers.{layer_id}" self.self_attn = Starcoder2Attention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) self.mlp = STARCODER2_MLP_CLASSES[config.mlp_type]( prefix=f"{prefix}.mlp", config=config, weights=weights ) self.input_layernorm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.norm_epsilon ) self.post_attention_layernorm = STARCODER2_NORMALIZATION_CLASSES[ config.norm_type ].load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.norm_epsilon, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) mlp_output = self.mlp(normed_attn_res_output) return mlp_output, attn_res class Starcoder2Model(torch.nn.Module): def __init__(self, config, weights): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ Starcoder2Layer( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = STARCODER2_NORMALIZATION_CLASSES[config.norm_type].load( prefix="model.norm", weights=weights, eps=config.norm_epsilon ) self.gradient_checkpointing = False self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, true_max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashStarcoder2ForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.model = Starcoder2Model(config, weights) try: self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) except RuntimeError: self.lm_head = SpeculativeHead.load( config, prefix="model.embed_tokens", weights=weights, ) self.max_past = config.sliding_window self.max_past_tensor = ( torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor) hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_starcoder2_modeling.py", "repo_id": "text-generation-inference", "token_count": 8784 }
209
import math import torch import torch.distributed import numpy as np from dataclasses import dataclass from opentelemetry import trace from transformers import PreTrainedTokenizerBase from transformers.models.llama import LlamaTokenizerFast from typing import Optional, Tuple, Type from text_generation_server.pb import generate_pb2 from text_generation_server.models import FlashCausalLM from text_generation_server.models.flash_causal_lm import FlashCausalLMBatch, BLOCK_SIZE from text_generation_server.models.cache_manager import ( get_cache_manager, ) from text_generation_server.models.custom_modeling.flash_mistral_modeling import ( FlashMistralForCausalLM, MistralConfig, ) from text_generation_server.utils.speculate import get_speculate from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, HeterogeneousNextTokenChooser, StoppingCriteria, ) tracer = trace.get_tracer(__name__) # Will be set in init SLIDING_WINDOW: Optional[int] = None SLIDING_WINDOW_BLOCKS: Optional[int] = None MEM_POOL = torch.cuda.graph_pool_handle() def set_sliding_window(sliding_window: int, sliding_window_blocks: int): global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS SLIDING_WINDOW = sliding_window SLIDING_WINDOW_BLOCKS = sliding_window_blocks def get_sliding_windows() -> Tuple[int, int]: global SLIDING_WINDOW global SLIDING_WINDOW_BLOCKS return SLIDING_WINDOW, SLIDING_WINDOW_BLOCKS # Adds windowing logic to FlashCausalLMBatch @dataclass class FlashMistralBatch(FlashCausalLMBatch): # Prefill cache indices is used to slice into the kv tensor before caching it into the paged attention buffers # as we only keep SLIDING_WINDOW values instead of the whole tensor prefill_cache_indices: Optional[torch.Tensor] = None @classmethod def from_pb( cls, pb: generate_pb2.Batch, tokenizer: PreTrainedTokenizerBase, dtype: torch.dtype, device: torch.device, ) -> "FlashCausalLMBatch": sliding_window, sliding_window_blocks = get_sliding_windows() batch_inputs = [] max_truncation = 0 for r in pb.requests: batch_inputs.append(r.inputs) max_truncation = max(max_truncation, r.truncate) batch_tokenized_inputs = tokenizer( batch_inputs, truncation=True, max_length=max_truncation )["input_ids"] position_ids = [] cu_seqlen_prefill = [0] needed_blocks_slots = [] start_slots = [] slot_indices = [] prefill_cache_indices = [] input_lengths = [] prefix_offsets = [] read_offsets = [] all_input_ids = [] requests_idx_mapping = {} all_prefill_logprobs = True no_prefill_logprobs = True prefill_head_indices = [] prefill_next_token_indices = [] prefill_cu_outlens = [0] next_token_chooser_parameters = [] stopping_criterias = [] top_n_tokens = [] # Cumulative length cumulative_length = 0 cumulative_max_length = 0 prefill_out_cumulative_length = 0 blocks = 0 max_seqlen = 0 max_length = 0 max_blocks = 0 # Parse batch for i, (r, tokenized_input) in enumerate( zip(pb.requests, batch_tokenized_inputs) ): # request id -> idx in list mapping requests_idx_mapping[r.id] = i tokenized_input = tokenized_input[-r.truncate :] input_length = len(tokenized_input) input_lengths.append(input_length) prefix_offsets.append(input_length - 5) read_offsets.append(input_length) all_input_ids.append(tokenized_input) # Position ids request_position_ids = torch.arange(0, input_length, dtype=torch.int32) position_ids.append(request_position_ids) # Add cumulative lengths of all previous inputs cu_seqlen_prefill.append(cumulative_length + input_length) next_token_chooser_parameters.append(r.parameters) stopping_criteria = StoppingCriteria.from_pb( r.stopping_parameters, tokenizer ) max_new_tokens = stopping_criteria.max_new_tokens stopping_criterias.append(stopping_criteria) top_n_tokens.append(r.top_n_tokens) # Paged attention # Remove one as the first token des not have a past speculative_length = get_speculate() total_tokens = input_length + max_new_tokens - 1 + speculative_length # Needed blocks can not go over SLIDING_WINDOW_BLOCKS needed_blocks = math.ceil(total_tokens / BLOCK_SIZE) if sliding_window_blocks is not None: needed_blocks = min(needed_blocks, sliding_window_blocks) blocks += needed_blocks needed_blocks_slots.append((needed_blocks, total_tokens)) start_slots.append(cumulative_max_length) request_slot_indices = torch.arange( cumulative_max_length, cumulative_max_length + input_length, dtype=torch.int64, ) slot_indices.append(request_slot_indices) # Create tensor to slice into the kv tensor in prefill if sliding_window is not None: request_prefill_cache_indices = torch.arange( cumulative_length + max(0, input_length - sliding_window), cumulative_length + input_length, dtype=torch.int64, ) prefill_cache_indices.append(request_prefill_cache_indices) all_prefill_logprobs = all_prefill_logprobs and r.prefill_logprobs no_prefill_logprobs = no_prefill_logprobs and not r.prefill_logprobs if r.prefill_logprobs: prefill_head_indices.append(request_position_ids + cumulative_length) prefill_next_token_indices.append( prefill_out_cumulative_length + input_length - 1 ) prefill_cu_outlens.append(prefill_out_cumulative_length + input_length) prefill_out_cumulative_length += input_length else: prefill_head_indices.append( torch.tensor( [cumulative_length + input_length - 1], dtype=torch.int32 ) ) prefill_next_token_indices.append(prefill_out_cumulative_length) prefill_cu_outlens.append(prefill_out_cumulative_length + 1) prefill_out_cumulative_length += 1 # Update cumulative_length += input_length cumulative_max_length += total_tokens max_seqlen = max(max_seqlen, input_length) max_blocks = max(max_blocks, needed_blocks) max_length = max( max_length, input_length + max_new_tokens + speculative_length ) next_token_chooser = HeterogeneousNextTokenChooser.from_pb( next_token_chooser_parameters, dtype, device, tokenizer ) start_slots = torch.tensor(start_slots, dtype=torch.int64) # Padded all_input_ids_tensor all_input_ids_tensor = np.zeros( (len(all_input_ids), max_length), dtype=np.int64 ) for i, input_ids in enumerate(all_input_ids): all_input_ids_tensor[i, : len(input_ids)] = input_ids # Create tensors on device all_input_ids_tensor = torch.tensor( all_input_ids_tensor, dtype=torch.int64, device=device ) if len(pb.requests) > 1: input_ids = np.concatenate(all_input_ids, dtype=np.int64) position_ids = torch.cat(position_ids) slot_indices = torch.cat(slot_indices) if sliding_window is not None: prefill_cache_indices = torch.cat(prefill_cache_indices) else: input_ids = all_input_ids[0] position_ids = position_ids[0] slot_indices = slot_indices[0] if sliding_window is not None: prefill_cache_indices = prefill_cache_indices[0] cu_seqlen_prefill = torch.tensor( cu_seqlen_prefill, device=device, dtype=torch.int32 ) position_ids = position_ids.to(device) slot_indices = slot_indices.to(device) prefill_cache_indices = ( prefill_cache_indices.to(device) if sliding_window is not None else None ) input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) input_lengths_tensor = torch.tensor( input_lengths, dtype=torch.int32, device=device ) if all_prefill_logprobs: prefill_head_indices = None prefill_next_token_indices = cu_seqlen_prefill[1:] - 1 elif no_prefill_logprobs: prefill_head_indices = cu_seqlen_prefill[1:] - 1 prefill_next_token_indices = None else: prefill_head_indices = torch.tensor( torch.cat(prefill_head_indices), dtype=torch.int64, device=device ) prefill_next_token_indices = torch.tensor( prefill_next_token_indices, dtype=torch.int64, device=device ) top_n_tokens_tensor = torch.tensor( top_n_tokens, device=device, dtype=torch.int64 ) return cls( batch_id=pb.id, requests=pb.requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, start_slots=start_slots, slot_indices=slot_indices, needed_blocks_slots=needed_blocks_slots, block_tables=None, block_tables_tensor=None, slots=None, max_seqlen=max_seqlen, prefill_head_indices=prefill_head_indices, prefill_next_token_indices=prefill_next_token_indices, prefill_cu_outlens=prefill_cu_outlens, input_lengths=input_lengths, input_lengths_tensor=input_lengths_tensor, prefix_offsets=prefix_offsets, read_offsets=read_offsets, all_input_ids=all_input_ids, all_input_ids_tensor=all_input_ids_tensor, next_token_chooser=next_token_chooser, stopping_criterias=stopping_criterias, top_n_tokens=top_n_tokens, top_n_tokens_tensor=top_n_tokens_tensor, blocks=blocks, max_blocks=max_blocks, prefill_cache_indices=prefill_cache_indices, speculative_ids=None, ) class BaseFlashMistral(FlashCausalLM): def __init__( self, config_cls, model_cls, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: raise NotImplementedError("FlashMistral is only available on GPU") tokenizer = LlamaTokenizerFast.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = config_cls.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code ) config.quantize = quantize config.use_medusa = use_medusa # Set context windows if config.sliding_window is not None: set_sliding_window( config.sliding_window, math.ceil(config.sliding_window / BLOCK_SIZE) ) torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights(filenames, device, dtype, process_group=self.process_group) if config.quantize in ["gptq", "awq"]: weights._set_gptq_params(model_id, revision) model = model_cls(config, weights) self.cuda_graphs = {} torch.distributed.barrier(group=self.process_group) super(BaseFlashMistral, self).__init__( model=model, tokenizer=tokenizer, num_layers=len(model.model.layers), num_kv_heads=model.model.num_key_value_heads, head_size=model.model.head_size, dtype=dtype, device=device, rank=rank, world_size=world_size, sliding_window=config.sliding_window, ) @property def batch_type(self) -> Type[FlashMistralBatch]: return FlashMistralBatch def cuda_graph_warmup(self, bs: int, max_s: int, max_bt: int): input_ids = torch.zeros(bs, dtype=torch.int64, device=self.device) position_ids = torch.zeros(bs, dtype=torch.int32, device=self.device) slots = torch.arange(bs, dtype=torch.int32, device=self.device) input_lengths = torch.ones(bs, dtype=torch.int32, device=self.device) * max_s block_tables = ( torch.arange(max_bt, dtype=torch.int32, device=self.device) .repeat(bs) .reshape((bs, max_bt)) ) kv_cache = get_cache_manager().kv_cache self.cuda_graphs[bs] = { "input_ids": input_ids, "position_ids": position_ids, "kv_cache": kv_cache, "block_tables": block_tables, "slots": slots, "input_lengths": input_lengths, } graph = torch.cuda.CUDAGraph() self.cuda_graphs[bs]["graph"] = graph torch.cuda.synchronize() # Run once outside to warmup self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None, ) torch.cuda.synchronize() with torch.cuda.graph(graph, pool=MEM_POOL): logits, speculative_logits = self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=None, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, prefill_cache_indices=None, lm_head_indices=None, ) self.cuda_graphs[bs]["logits"] = logits self.cuda_graphs[bs]["speculative_logits"] = speculative_logits torch.cuda.synchronize() def forward( self, batch: FlashMistralBatch ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Model Forward if batch.speculative_ids is not None: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = get_cache_manager().kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices speculative_ids = batch.speculative_ids B, speculative_length = speculative_ids.shape new_length = speculative_length + 1 new_input_ids = torch.cat( [input_ids.unsqueeze(-1), speculative_ids], dim=1 ).reshape(-1) arange = torch.arange(new_length, device=position_ids.device).unsqueeze(0) arange_int = arange.to(dtype=torch.int32) new_position_ids = ( position_ids.unsqueeze(-1).expand(B, new_length) + arange ).view(-1) slots = (slots.unsqueeze(-1).expand(B, new_length) + arange_int).view(-1) input_lengths = ( input_lengths.unsqueeze(-1).expand(B, new_length) + arange_int ).view(-1) # Add Copy the block tables for all members block_tables = ( block_tables.unsqueeze(1) .expand(B, new_length, -1) .reshape(B * new_length, -1) .contiguous() ) max_s = max_s + speculative_length input_ids = new_input_ids position_ids = new_position_ids else: input_ids = batch.input_ids position_ids = batch.position_ids cu_seqlen_prefill = batch.cu_seqlen_prefill kv_cache = get_cache_manager().kv_cache block_tables = batch.block_tables_tensor slots = batch.slots[batch.slot_indices] input_lengths = batch.input_lengths_tensor max_s = batch.max_seqlen lm_head_indices = batch.prefill_head_indices if cu_seqlen_prefill is None and self.model.max_past is not None: # In decode, not prefill, we're actually overwriting the KV-cache # in a circular buffer mode. # This makes sure the max_s for the decode pass is correct. max_s = min(self.model.max_past, max_s) bs = input_ids.shape[0] padded_bs = bs if bs == 3: padded_bs = 4 elif 3 < bs <= 8: padded_bs = 8 elif bs > 8: padded_bs = (bs + 7) // 8 * 8 # Try to find an associated cuda graph cuda_graph = self.cuda_graphs.get(padded_bs, None) if cu_seqlen_prefill is not None or cuda_graph is None: logits, speculative_logits = self.model.forward( input_ids=input_ids, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, input_lengths=input_lengths, max_s=max_s, prefill_cache_indices=batch.prefill_cache_indices, lm_head_indices=lm_head_indices, ) if batch.prefill_cache_indices is not None: batch.prefill_cache_indices = None return logits, speculative_logits # Copy inputs to the static inputs of the cuda graph # Static inputs are potentially padded cuda_graph["input_ids"][: input_ids.shape[0]] = input_ids cuda_graph["position_ids"][: position_ids.shape[0]] = position_ids cuda_graph["block_tables"][ : block_tables.shape[0], : block_tables.shape[1] ] = block_tables cuda_graph["slots"].fill_(-1) cuda_graph["slots"][: slots.shape[0]] = slots cuda_graph["input_lengths"].zero_() cuda_graph["input_lengths"][: input_lengths.shape[0]] = input_lengths # Replay the graph cuda_graph["graph"].replay() # Slice output to the correct shape speculative_logits = ( cuda_graph["speculative_logits"][:bs] if cuda_graph["speculative_logits"] is not None else None ) logits = cuda_graph["logits"][:bs] return logits, speculative_logits class FlashMistral(BaseFlashMistral): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): super(FlashMistral, self).__init__( config_cls=MistralConfig, model_cls=FlashMistralForCausalLM, model_id=model_id, revision=revision, quantize=quantize, use_medusa=use_medusa, dtype=dtype, trust_remote_code=trust_remote_code, )
text-generation-inference/server/text_generation_server/models/flash_mistral.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/flash_mistral.py", "repo_id": "text-generation-inference", "token_count": 10224 }
210
import torch import torch.distributed from typing import Optional from transformers import ( AutoTokenizer, AutoConfig, ) from text_generation_server.models.custom_modeling.opt_modeling import OPTForCausalLM from text_generation_server.models import CausalLM from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class OPTSharded(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) config = AutoConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, ) config.quantize = quantize config.use_medusa = use_medusa tokenizer.pad_token_id = config.pad_token_id torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": weights._set_gptq_params(model_id, revision) model = OPTForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ): outputs = self.model.forward( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, use_cache=True, ) return outputs.logits, outputs.past_key_values
text-generation-inference/server/text_generation_server/models/opt.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/opt.py", "repo_id": "text-generation-inference", "token_count": 1210 }
211
# https://github.com/fpgaminer/GPTQ-triton """ Mostly the same as the autotuner in Triton, but with a few changes like using 40 runs instead of 100. """ import builtins import math import time from typing import Dict import triton class Autotuner(triton.KernelInterface): def __init__( self, fn, arg_names, configs, key, reset_to_zero, prune_configs_by: Dict = None, nearest_power_of_two: bool = False, ): """ :param prune_configs_by: a dict of functions that are used to prune configs, fields: 'perf_model': performance model used to predicate running time with different configs, returns running time 'top_k': number of configs to bench 'prune_num_stages_by'(optional): a function used to prune num_stages. It take configs:List[Config] as its input, and returns pruned configs. 'nearest_power_of_two'(optional): whether to round key arguments to the nearest power of two when caching tuning results """ if not configs: self.configs = [triton.Config({}, num_warps=4, num_stages=2)] else: self.configs = configs self.key_idx = [arg_names.index(k) for k in key] self.nearest_power_of_two = nearest_power_of_two self.cache = {} # hook to reset all required tensor to zeros before relaunching a kernel self.hook = lambda args: 0 if reset_to_zero is not None: self.reset_idx = [arg_names.index(k) for k in reset_to_zero] def _hook(args): for i in self.reset_idx: args[i].zero_() self.hook = _hook self.arg_names = arg_names # prune configs if prune_configs_by: perf_model, top_k = ( prune_configs_by["perf_model"], prune_configs_by["top_k"], ) if "early_config_prune" in prune_configs_by: early_config_prune = prune_configs_by["early_config_prune"] else: perf_model, top_k, early_config_prune = None, None, None self.perf_model, self.configs_top_k = perf_model, top_k self.early_config_prune = early_config_prune self.fn = fn def _bench(self, *args, config, **meta): # check for conflicts, i.e. meta-parameters both provided # as kwargs and by the autotuner conflicts = meta.keys() & config.kwargs.keys() if conflicts: raise ValueError( f"Conflicting meta-parameters: {', '.join(conflicts)}." " Make sure that you don't re-define auto-tuned symbols." ) # augment meta-parameters with tunable ones current = dict(meta, **config.kwargs) def kernel_call(): if config.pre_hook: config.pre_hook(self.nargs) self.hook(args) self.fn.run( *args, num_warps=config.num_warps, num_stages=config.num_stages, **current, ) try: # In testings using only 40 reps seems to be close enough and it appears to be what PyTorch uses # PyTorch also sets fast_flush to True, but I didn't see any speedup so I'll leave the default return triton.testing.do_bench( kernel_call, quantiles=(0.5, 0.2, 0.8), rep=40 ) except triton.OutOfResources: return (float("inf"), float("inf"), float("inf")) def run(self, *args, **kwargs): self.nargs = dict(zip(self.arg_names, args)) if len(self.configs) > 1: key = tuple(args[i] for i in self.key_idx) # This reduces the amount of autotuning by rounding the keys to the nearest power of two # In my testing this gives decent results, and greatly reduces the amount of tuning required if self.nearest_power_of_two: key = tuple([2 ** int(math.log2(x) + 0.5) for x in key]) if key not in self.cache: # prune configs pruned_configs = self.prune_configs(kwargs) bench_start = time.time() timings = { config: self._bench(*args, config=config, **kwargs) for config in pruned_configs } bench_end = time.time() self.bench_time = bench_end - bench_start self.cache[key] = builtins.min(timings, key=timings.get) self.hook(args) self.configs_timings = timings config = self.cache[key] else: config = self.configs[0] self.best_config = config if config.pre_hook is not None: config.pre_hook(self.nargs) return self.fn.run( *args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs, ) def prune_configs(self, kwargs): pruned_configs = self.configs if self.early_config_prune: pruned_configs = self.early_config_prune(self.configs, self.nargs) if self.perf_model: top_k = self.configs_top_k if isinstance(top_k, float) and top_k <= 1.0: top_k = int(len(self.configs) * top_k) if len(pruned_configs) > top_k: est_timing = { config: self.perf_model( **self.nargs, **kwargs, **config.kwargs, num_stages=config.num_stages, num_warps=config.num_warps, ) for config in pruned_configs } pruned_configs = sorted(est_timing.keys(), key=lambda x: est_timing[x])[ :top_k ] return pruned_configs def warmup(self, *args, **kwargs): self.nargs = dict(zip(self.arg_names, args)) for config in self.prune_configs(kwargs): self.fn.warmup( *args, num_warps=config.num_warps, num_stages=config.num_stages, **kwargs, **config.kwargs, ) self.nargs = None def autotune( configs, key, prune_configs_by=None, reset_to_zero=None, nearest_power_of_two=False ): """ Decorator for auto-tuning a :code:`triton.jit`'d function. .. highlight:: python .. code-block:: python @triton.autotune(configs=[ triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4), triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8), ], key=['x_size'] # the two above configs will be evaluated anytime # the value of x_size changes ) @triton.jit def kernel(x_ptr, x_size, **META): BLOCK_SIZE = META['BLOCK_SIZE'] :note: When all the configurations are evaluated, the kernel will run multiple time. This means that whatever value the kernel updates will be updated multiple times. To avoid this undesired behavior, you can use the `reset_to_zero` argument, which reset the value of the provided tensor to `zero` before running any configuration. :param configs: a list of :code:`triton.Config` objects :type configs: list[triton.Config] :param key: a list of argument names whose change in value will trigger the evaluation of all provided configs. :type key: list[str] :param prune_configs_by: a dict of functions that are used to prune configs, fields: 'perf_model': performance model used to predicate running time with different configs, returns running time 'top_k': number of configs to bench 'early_config_prune'(optional): a function used to do early prune (eg, num_stages). It take configs:List[Config] as its input, and returns pruned configs. :param reset_to_zero: a list of argument names whose value will be reset to zero before evaluating any configs. :type reset_to_zero: list[str] """ def decorator(fn): return Autotuner( fn, fn.arg_names, configs, key, reset_to_zero, prune_configs_by, nearest_power_of_two, ) return decorator def matmul248_kernel_config_pruner(configs, nargs): """ The main purpose of this function is to shrink BLOCK_SIZE_* when the corresponding dimension is smaller. """ m = max(2 ** int(math.ceil(math.log2(nargs["M"]))), 16) n = max(2 ** int(math.ceil(math.log2(nargs["N"]))), 16) k = max(2 ** int(math.ceil(math.log2(nargs["K"]))), 16) used = set() for config in configs: block_size_m = min(m, config.kwargs["BLOCK_SIZE_M"]) block_size_n = min(n, config.kwargs["BLOCK_SIZE_N"]) block_size_k = min(k, config.kwargs["BLOCK_SIZE_K"]) group_size_m = config.kwargs["GROUP_SIZE_M"] if ( block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps, ) in used: continue used.add( ( block_size_m, block_size_n, block_size_k, group_size_m, config.num_stages, config.num_warps, ) ) yield triton.Config( { "BLOCK_SIZE_M": block_size_m, "BLOCK_SIZE_N": block_size_n, "BLOCK_SIZE_K": block_size_k, "GROUP_SIZE_M": group_size_m, }, num_stages=config.num_stages, num_warps=config.num_warps, )
text-generation-inference/server/text_generation_server/utils/gptq/custom_autotune.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/gptq/custom_autotune.py", "repo_id": "text-generation-inference", "token_count": 5116 }
212
import subprocess import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument("--check", action="store_true") args = parser.parse_args() output = subprocess.check_output(["text-generation-launcher", "--help"]).decode( "utf-8" ) wrap_code_blocks_flag = "<!-- WRAP CODE BLOCKS -->" final_doc = f"# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n" lines = output.split("\n") header = "" block = [] for line in lines: if line.startswith(" -") or line.startswith(" -"): rendered_block = "\n".join(block) if header: final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" else: final_doc += f"```shell\n{rendered_block}\n```\n" block = [] tokens = line.split("<") if len(tokens) > 1: header = tokens[-1][:-1] else: header = line.split("--")[-1] header = header.upper().replace("-", "_") block.append(line) rendered_block = "\n".join(block) final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" block = [] filename = "docs/source/basic_tutorials/launcher.md" if args.check: with open(filename, "r") as f: doc = f.read() if doc != final_doc: tmp = "launcher.md" with open(tmp, "w") as g: g.write(final_doc) diff = subprocess.run( ["diff", tmp, filename], capture_output=True ).stdout.decode("utf-8") print(diff) raise Exception( "Doc is not up-to-date, run `python update_doc.py` in order to update it" ) else: with open(filename, "w") as f: f.write(final_doc) if __name__ == "__main__": main()
text-generation-inference/update_doc.py/0
{ "file_path": "text-generation-inference/update_doc.py", "repo_id": "text-generation-inference", "token_count": 991 }
213
extern crate napi_build; fn main() { napi_build::setup(); }
tokenizers/bindings/node/build.rs/0
{ "file_path": "tokenizers/bindings/node/build.rs", "repo_id": "tokenizers", "token_count": 26 }
214
// import { promisify } from 'util' import { BPE, Tokenizer, mergeEncodings, slice } from '../../' describe('slice', () => { const text = 'My name is John 👋' const sliceText = slice.bind({}, text) it('returns the full text when no params', () => { const sliced = sliceText() expect(sliced).toEqual(text) }) it('accepts `undefined` as second parameter', () => { const original = sliceText(undefined) expect(original).toEqual(text) }) it('accepts `undefined` as third parameter', () => { const original = sliceText(0, undefined) expect(original).toEqual(text) }) it('throws an error when `begin` is out of range', () => { expect(() => sliceText(1000)).toThrow() }) it('returns slice starting at the specified index', () => { const original = sliceText(3) expect(original).toEqual('name is John 👋') }) it('throws an error when `end` is out of range', () => { expect(() => sliceText(0, 1000)).toThrow() }) it('returns the text between the two specified indexes', () => { const original = sliceText(3, 7) expect(original).toEqual('name') }) describe('with only a negative `begin`', () => { it('returns the original string counting from the end when in the range', () => { const original = sliceText(-1) expect(original).toEqual('👋') }) it('throws an error when out of range', () => { expect(() => sliceText(-1000)).toThrow() }) }) describe('with a positive `begin` and a negative `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(3, -7) expect(original).toEqual('name is') }) it('throws an error when resulting `end` index is lower than `begin`', () => { expect(() => sliceText(7, -12)).toThrow() }) it('throws an error when `begin` is out of range', () => { expect(() => sliceText(1000, -12)).toThrow() }) it('throws an error when resulting `end` index is out of range', () => { expect(() => sliceText(7, -1000)).toThrow() }) }) describe('with a negative `begin` and a positive `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(-9, 10) expect(original).toEqual('is') }) it('throws an error when resulting `begin` index is upper than `end`', () => { expect(() => sliceText(-3, 5)).toThrow() }) it('throws an error when `end` is out of range', () => { expect(() => sliceText(-5, 1000)).toThrow() }) it('throws an error when resulting `begin` index is out of range', () => { expect(() => sliceText(-1000, 10)).toThrow() }) }) describe('with negatives `begin` and `end`', () => { it('returns correct slice when resulting range is valid', () => { const original = sliceText(-9, -7) expect(original).toEqual('is') }) it('throws an error when resulting `end` index is lower than `begin`', () => { expect(() => sliceText(-5, -10)).toThrow() }) it('throws an error when resulting `begin` index is out of range', () => { expect(() => sliceText(-1000, -10)).toThrow() }) it('throws an error when resulting `end` index is out of range', () => { expect(() => sliceText(-10, -1000)).toThrow() }) }) }) describe('mergeEncodings', () => { const model = BPE.empty() const tokenizer = new Tokenizer(model) tokenizer.addTokens(['my', 'name', 'is', 'john']) it('accepts `undefined` as a second parameter', () => { const encoding = mergeEncodings([], undefined) expect(encoding.constructor.name).toEqual('Encoding') }) it('returns correct result with `growingOffsets` not provided', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding]) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [0, 4], ]) }) it('returns correct result when `growingOffsets` is `false`', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding], false) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [0, 4], ]) }) it('returns correct result when `growingOffsets` is `true`', async () => { const firstEncoding = await tokenizer.encode('my name is', null) const secondEncoding = await tokenizer.encode('john', null) const encoding = mergeEncodings([firstEncoding, secondEncoding], true) expect(encoding.getTokens()).toEqual(['my', 'name', 'is', 'john']) expect(encoding.getOffsets()).toEqual([ [0, 2], [3, 7], [8, 10], [10, 14], ]) }) })
tokenizers/bindings/node/lib/bindings/utils.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/utils.test.ts", "repo_id": "tokenizers", "token_count": 1866 }
215
{ "name": "tokenizers-linux-arm64-musl", "version": "0.13.4-rc1", "os": [ "linux" ], "cpu": [ "arm64" ], "main": "tokenizers.linux-arm64-musl.node", "files": [ "tokenizers.linux-arm64-musl.node" ], "description": "Tokenizers platform specific bindings", "keywords": [ "napi-rs", "NAPI", "N-API", "Rust", "node-addon", "node-addon-api" ], "license": "MIT", "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "repository": "tokenizers", "libc": [ "musl" ] }
tokenizers/bindings/node/npm/linux-arm64-musl/package.json/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm64-musl/package.json", "repo_id": "tokenizers", "token_count": 291 }
216
#![deny(clippy::all)] pub const VERSION: &str = env!("CARGO_PKG_VERSION"); mod arc_rwlock_serde; pub mod decoders; pub mod encoding; pub mod models; pub mod normalizers; pub mod pre_tokenizers; pub mod processors; pub mod tasks; pub mod tokenizer; pub mod trainers; pub mod utils;
tokenizers/bindings/node/src/lib.rs/0
{ "file_path": "tokenizers/bindings/node/src/lib.rs", "repo_id": "tokenizers", "token_count": 102 }
217
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - [#1096] Python 3.11 support ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#956] PyO3 version upgrade - [#1055] M1 automated builds - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#962] Fix tests for python 3.10 - [#961] Added link for Ruby port of `tokenizers` ## [0.11.6] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.5] - [#895] Build `python 3.10` wheels. ## [0.11.4] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.3] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#585] Conda version should now work on old CentOS - [#844] Fixing interaction between `is_pretokenized` and `trim_offsets`. - [#851] Doc links ### Added - [#657]: Add SplitDelimiterBehavior customization to Punctuation constructor - [#845]: Documentation for `Decoders`. ### Changed - [#850]: Added a feature gate to enable disabling `http` features - [#718]: Fix `WordLevel` tokenizer determinism during training - [#762]: Add a way to specify the unknown token in `SentencePieceUnigramTokenizer` - [#770]: Improved documentation for `UnigramTrainer` - [#780]: Add `Tokenizer.from_pretrained` to load tokenizers from the Hugging Face Hub - [#793]: Saving a pretty JSON file by default when saving a tokenizer ## [0.10.3] ### Fixed - [#686]: Fix SPM conversion process for whitespace deduplication - [#707]: Fix stripping strings containing Unicode characters ### Added - [#693]: Add a CTC Decoder for Wave2Vec models ### Removed - [#714]: Removed support for Python 3.5 ## [0.10.2] ### Fixed - [#652]: Fix offsets for `Precompiled` corner case - [#656]: Fix BPE `continuing_subword_prefix` - [#674]: Fix `Metaspace` serialization problems ## [0.10.1] ### Fixed - [#616]: Fix SentencePiece tokenizers conversion - [#617]: Fix offsets produced by Precompiled Normalizer (used by tokenizers converted from SPM) - [#618]: Fix Normalizer.normalize with `PyNormalizedStringRefMut` - [#620]: Fix serialization/deserialization for overlapping models - [#621]: Fix `ByteLevel` instantiation from a previously saved state (using `__getstate__()`) ## [0.10.0] ### Added - [#508]: Add a Visualizer for notebooks to help understand how the tokenizers work - [#519]: Add a `WordLevelTrainer` used to train a `WordLevel` model - [#533]: Add support for conda builds - [#542]: Add Split pre-tokenizer to easily split using a pattern - [#544]: Ability to train from memory. This also improves the integration with `datasets` - [#590]: Add getters/setters for components on BaseTokenizer - [#574]: Add `fust_unk` option to SentencePieceBPETokenizer ### Changed - [#509]: Automatically stubbing the `.pyi` files - [#519]: Each `Model` can return its associated `Trainer` with `get_trainer()` - [#530]: The various attributes on each component can be get/set (ie. `tokenizer.model.dropout = 0.1`) - [#538]: The API Reference has been improved and is now up-to-date. ### Fixed - [#519]: During training, the `Model` is now trained in-place. This fixes several bugs that were forcing to reload the `Model` after a training. - [#539]: Fix `BaseTokenizer` enable_truncation docstring ## [0.9.4] ### Fixed - [#492]: Fix `from_file` on `BertWordPieceTokenizer` - [#498]: Fix the link to download `sentencepiece_model_pb2.py` - [#500]: Fix a typo in the docs quicktour ### Changed - [#506]: Improve Encoding mappings for pairs of sequence ## [0.9.3] ### Fixed - [#470]: Fix hanging error when training with custom component - [#476]: TemplateProcessing serialization is now deterministic - [#481]: Fix SentencePieceBPETokenizer.from_files ### Added - [#477]: UnicodeScripts PreTokenizer to avoid merges between various scripts - [#480]: Unigram now accepts an `initial_alphabet` and handles `special_tokens` correctly ## [0.9.2] ### Fixed - [#464]: Fix a problem with RobertaProcessing being deserialized as BertProcessing ## [0.9.1] ### Fixed - [#459]: Fix a problem with deserialization ## [0.9.0] ### Fixed - [#362]: Fix training deadlock with Python components. - [#363]: Fix a crash when calling `.train` with some non-existent files - [#355]: Remove a lot of possible crashes - [#389]: Improve truncation (crash and consistency) ### Added - [#379]: Add the ability to call `encode`/`encode_batch` with numpy arrays - [#292]: Support for the Unigram algorithm - [#378], [#394], [#416], [#417]: Many new Normalizer and PreTokenizer - [#403]: Add `TemplateProcessing` `PostProcessor`. - [#420]: Ability to fuse the "unk" token in BPE. ### Changed - [#360]: Lots of improvements related to words/alignment tracking - [#426]: Improvements on error messages thanks to PyO3 0.12 ## [0.8.1] ### Fixed - [#333]: Fix deserialization of `AddedToken`, where the content was not restored properly ### Changed - [#329]: Improved warning and behavior when we detect a fork - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. ## [0.8.0] ### Highlights of this release - We can now encode both pre-tokenized inputs, and raw strings. This is especially usefull when processing datasets that are already pre-tokenized like for NER (Name Entity Recognition), and helps while applying labels to each word. - Full tokenizer serialization. It is now easy to save a tokenizer to a single JSON file, to later load it back with just one line of code. That's what sharing a Tokenizer means now: 1 line of code. - With the serialization comes the compatibility with `Pickle`! The Tokenizer, all of its components, Encodings, everything can be pickled! - Training a tokenizer is now even faster (up to 5-10x) than before! - Compatibility with `multiprocessing`, even when using the `fork` start method. Since this library makes heavy use of the multithreading capacities of our computers to allows a very fast tokenization, this led to problems (deadlocks) when used with `multiprocessing`. This version now allows to disable the parallelism, and will warn you if this is necessary. - And a lot of other improvements, and fixes. ### Fixed - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens ### Added - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...). This adds some methods to easily save/load an entire tokenizer (`from_str`, `from_file`). - [#273]: `Tokenizer` and its parts are now pickable - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. Use with `enable_padding(pad_to_multiple_of=8)` for example. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. This is especially usefull when using `multiprocessing` capabilities, with the `fork` start method, which happens to be the default on Linux systems. Without disabling the parallelism, the process dead-locks while encoding. (Cf [#187] for more information) ### Changed - Improved errors generated during truncation: When the provided max length is too low are now handled properly. - [#249] `encode` and `encode_batch` now accept pre-tokenized inputs. When the input is pre-tokenized, the argument `is_pretokenized=True` must be specified. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. ## [0.7.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#193]: `encode` and `encode_batch` now take a new optional argument, specifying whether we should add the special tokens. This is activated by default. - [#197]: `original_str` and `normalized_str` have been removed from the `Encoding` returned by `encode` and `encode_batch`. This brings a reduction of 70% of the memory footprint. - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore. - The added token given to `add_special_tokens` or `add_tokens` on a `Tokenizer`, or while using `train(special_tokens=...)` can now be instances of `AddedToken` to provide more control over these tokens. - [#136]: Updated Pyo3 version - [#136]: Static methods `Model.from_files` and `Model.empty` are removed in favor of using constructors. - [#239]: `CharBPETokenizer` now corresponds to OpenAI GPT BPE implementation by default. ### Added - [#188]: `ByteLevel` is also a `PostProcessor` now and handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token. It has been added to `ByteLevelBPETokenizer` but it is off by default (`trim_offsets=False`). - [#236]: `RobertaProcessing` also handles trimming the offsets. - [#234]: New alignment mappings on the `Encoding`. Provide methods to easily convert between `char` or `word` (input space) and `token` (output space). - `post_process` can be called on the `Tokenizer` - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` with `get_vocab(with_added_tokens: bool)` - [#136] Models can now be instantiated through object constructors. ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space=True` - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question). - [#205]: Trim the decoded string in `BPEDecoder` used by `CharBPETokenizer` ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. If you are using `ByteLevelBPETokenizer`, this option is disabled by default (`trim_offsets=False`). - `BertWordPieceTokenizer` option to `add_special_tokens` must now be given to `encode` or `encode_batch` - Access to the `original_str` on the `Encoding` has been removed. The original string is the input of `encode` so it didn't make sense to keep it here. - No need to call `original_str.offsets(offsets[N])` to convert offsets to the original string. They are now relative to the original string by default. - Access to the `normalized_str` on the `Encoding` has been removed. Can be retrieved by calling `normalize(sequence)` on the `Tokenizer` - Change `Model.from_files` and `Model.empty` to use constructor. The model constructor should take the same arguments as the old methods. (ie `BPE(vocab, merges)` or `BPE()`) - If you were using the `CharBPETokenizer` and want to keep the same behavior as before, set `bert_normalizer=False` and `split_on_whitespace_only=True`. ## [0.6.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#160]: Some default tokens were missing from `BertWordPieceTokenizer` - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes. - [#174]: The `longest_first` truncation strategy had a bug ## [0.5.2] - [#163]: Do not open all files directly while training ### Fixed - We introduced a bug related to the saving of the WordPiece model in 0.5.1: The `vocab.txt` file was named `vocab.json`. This is now fixed. - The `WordLevel` model was also saving its vocabulary to the wrong format. ## [0.5.1] ### Changed - `name` argument is now optional when saving a `Model`'s vocabulary. When the name is not specified, the files get a more generic naming, like `vocab.json` or `merges.txt`. ## [0.5.0] ### Changed - [#145]: `BertWordPieceTokenizer` now cleans up some tokenization artifacts while decoding - [#149]: `ByteLevelBPETokenizer` now has `dropout`. - `do_lowercase` has been changed to `lowercase` for consistency between the different tokenizers. (Especially `ByteLevelBPETokenizer` and `CharBPETokenizer`) - [#139]: Expose `__len__` on `Encoding` - Improved padding performances. ### Added - Added a new `Strip` normalizer ### Fixed - [#145]: Decoding was buggy on `BertWordPieceTokenizer`. - [#152]: Some documentation and examples were still using the old `BPETokenizer` ### How to migrate - Use `lowercase` when initializing `ByteLevelBPETokenizer` or `CharBPETokenizer` instead of `do_lowercase`. ## [0.4.2] ### Fixed - [#137]: Fix a bug in the class `WordPieceTrainer` that prevented `BertWordPieceTokenizer` from being trained. ## [0.4.1] ### Fixed - [#134]: Fix a bug related to the punctuation in BertWordPieceTokenizer ## [0.4.0] ### Changed - [#131]: Replaced all .new() class methods by a proper __new__ implementation - Improved typings ### How to migrate - Remove all `.new` on all classe instanciations ## [0.3.0] ### Changed - BPETokenizer has been renamed to CharBPETokenizer for clarity. - Improve truncation/padding and the handling of overflowing tokens. Now when a sequence gets truncated, we provide a list of overflowing `Encoding` that are ready to be processed by a language model, just as the main `Encoding`. - Provide mapping to the original string offsets using: ``` output = tokenizer.encode(...) print(output.original_str.offsets(output.offsets[3])) ``` - [#99]: Exposed the vocabulary size on all tokenizers ### Added - Added `CharDelimiterSplit`: a new `PreTokenizer` that allows splitting sequences on the given delimiter (Works like `.split(delimiter)`) - Added `WordLevel`: a new model that simply maps `tokens` to their `ids`. ### Fixed - Fix a bug with IndexableString - Fix a bug with truncation ### How to migrate - Rename `BPETokenizer` to `CharBPETokenizer` - `Encoding.overflowing` is now a List instead of a `Optional[Encoding]` ## [0.2.1] ### Fixed - Fix a bug with the IDs associated with added tokens. - Fix a bug that was causing crashes in Python 3.5 [#1096]: https://github.com/huggingface/tokenizers/pull/1096 [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#962]: https://github.com/huggingface/tokenizers/pull/962 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#895]: https://github.com/huggingface/tokenizers/pull/895 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#850]: https://github.com/huggingface/tokenizers/pull/850 [#844]: https://github.com/huggingface/tokenizers/pull/844 [#845]: https://github.com/huggingface/tokenizers/pull/845 [#851]: https://github.com/huggingface/tokenizers/pull/851 [#585]: https://github.com/huggingface/tokenizers/pull/585 [#793]: https://github.com/huggingface/tokenizers/pull/793 [#780]: https://github.com/huggingface/tokenizers/pull/780 [#770]: https://github.com/huggingface/tokenizers/pull/770 [#762]: https://github.com/huggingface/tokenizers/pull/762 [#718]: https://github.com/huggingface/tokenizers/pull/718 [#714]: https://github.com/huggingface/tokenizers/pull/714 [#707]: https://github.com/huggingface/tokenizers/pull/707 [#693]: https://github.com/huggingface/tokenizers/pull/693 [#686]: https://github.com/huggingface/tokenizers/pull/686 [#674]: https://github.com/huggingface/tokenizers/pull/674 [#657]: https://github.com/huggingface/tokenizers/pull/657 [#656]: https://github.com/huggingface/tokenizers/pull/656 [#652]: https://github.com/huggingface/tokenizers/pull/652 [#621]: https://github.com/huggingface/tokenizers/pull/621 [#620]: https://github.com/huggingface/tokenizers/pull/620 [#618]: https://github.com/huggingface/tokenizers/pull/618 [#617]: https://github.com/huggingface/tokenizers/pull/617 [#616]: https://github.com/huggingface/tokenizers/pull/616 [#590]: https://github.com/huggingface/tokenizers/pull/590 [#574]: https://github.com/huggingface/tokenizers/pull/574 [#544]: https://github.com/huggingface/tokenizers/pull/544 [#542]: https://github.com/huggingface/tokenizers/pull/542 [#539]: https://github.com/huggingface/tokenizers/pull/539 [#538]: https://github.com/huggingface/tokenizers/pull/538 [#533]: https://github.com/huggingface/tokenizers/pull/533 [#530]: https://github.com/huggingface/tokenizers/pull/530 [#519]: https://github.com/huggingface/tokenizers/pull/519 [#509]: https://github.com/huggingface/tokenizers/pull/509 [#508]: https://github.com/huggingface/tokenizers/pull/508 [#506]: https://github.com/huggingface/tokenizers/pull/506 [#500]: https://github.com/huggingface/tokenizers/pull/500 [#498]: https://github.com/huggingface/tokenizers/pull/498 [#492]: https://github.com/huggingface/tokenizers/pull/492 [#481]: https://github.com/huggingface/tokenizers/pull/481 [#480]: https://github.com/huggingface/tokenizers/pull/480 [#477]: https://github.com/huggingface/tokenizers/pull/477 [#476]: https://github.com/huggingface/tokenizers/pull/476 [#470]: https://github.com/huggingface/tokenizers/pull/470 [#464]: https://github.com/huggingface/tokenizers/pull/464 [#459]: https://github.com/huggingface/tokenizers/pull/459 [#420]: https://github.com/huggingface/tokenizers/pull/420 [#417]: https://github.com/huggingface/tokenizers/pull/417 [#416]: https://github.com/huggingface/tokenizers/pull/416 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#394]: https://github.com/huggingface/tokenizers/pull/394 [#389]: https://github.com/huggingface/tokenizers/pull/389 [#379]: https://github.com/huggingface/tokenizers/pull/379 [#378]: https://github.com/huggingface/tokenizers/pull/378 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#362]: https://github.com/huggingface/tokenizers/pull/362 [#360]: https://github.com/huggingface/tokenizers/pull/360 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#333]: https://github.com/huggingface/tokenizers/pull/333 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#329]: https://github.com/huggingface/tokenizers/pull/329 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#292]: https://github.com/huggingface/tokenizers/pull/292 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#273]: https://github.com/huggingface/tokenizers/pull/273 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [#239]: https://github.com/huggingface/tokenizers/pull/239 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#187]: https://github.com/huggingface/tokenizers/issues/187 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#160]: https://github.com/huggingface/tokenizers/issues/160 [#156]: https://github.com/huggingface/tokenizers/pull/156 [#152]: https://github.com/huggingface/tokenizers/issues/152 [#149]: https://github.com/huggingface/tokenizers/issues/149 [#145]: https://github.com/huggingface/tokenizers/issues/145 [#139]: https://github.com/huggingface/tokenizers/issues/139 [#137]: https://github.com/huggingface/tokenizers/issues/137 [#134]: https://github.com/huggingface/tokenizers/issues/134 [#131]: https://github.com/huggingface/tokenizers/issues/131 [#99]: https://github.com/huggingface/tokenizers/pull/99
tokenizers/bindings/python/CHANGELOG.md/0
{ "file_path": "tokenizers/bindings/python/CHANGELOG.md", "repo_id": "tokenizers", "token_count": 7408 }
218
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py", "repo_id": "tokenizers", "token_count": 94 }
219
.tokenized-text { width:100%; padding:2rem; max-height: 400px; overflow-y: auto; box-sizing:border-box; line-height:4rem; /* Lots of space between lines */ font-family: "Roboto Light", "Ubuntu Light", "Ubuntu", monospace; box-shadow: 2px 2px 2px rgba(0,0,0,0.2); background-color: rgba(0,0,0,0.01); letter-spacing:2px; /* Give some extra separation between chars */ } .non-token{ /* White space and other things the tokenizer ignores*/ white-space: pre; letter-spacing:4px; border-top:1px solid #A0A0A0; /* A gentle border on top and bottom makes tabs more ovious*/ border-bottom:1px solid #A0A0A0; line-height: 1rem; height: calc(100% - 2px); } .token { white-space: pre; position:relative; color:black; letter-spacing:2px; } .annotation{ white-space:nowrap; /* Important - ensures that annotations appears even if the annotated text wraps a line */ border-radius:4px; position:relative; width:fit-content; } .annotation:before { /*The before holds the text and the after holds the background*/ z-index:1000; /* Make sure this is above the background */ content:attr(data-label); /* The annotations label is on a data attribute */ color:white; position:absolute; font-size:1rem; text-align:center; font-weight:bold; top:1.75rem; line-height:0; left:0; width:100%; padding:0.5rem 0; /* These make it so an annotation doesn't stretch beyond the annotated text if the label is longer*/ overflow: hidden; white-space: nowrap; text-overflow:ellipsis; } .annotation:after { content:attr(data-label); /* The content defines the width of the annotation*/ position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:100%; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; /* Nast hack below: We set the annotations color in code because we don't know the colors at css time. But you can't pass a color as a data attribute to get it into the pseudo element (this thing) So to get around that, annotations have the color set on them with a style attribute and then we can get the color with currentColor. Annotations wrap tokens and tokens set the color back to black */ background-color: currentColor; } .annotation:hover::after, .annotation:hover::before{ /* When the user hovers over an annotation expand the label to display in full */ min-width: fit-content; } .annotation:hover{ /* Emphasize the annotation start end with a border on hover*/ border-color: currentColor; border: 2px solid; } .special-token:not(:empty){ /* A none empty special token is like UNK (as opposed to CLS which has no representation in the text ) */ position:relative; } .special-token:empty::before{ /* Special tokens that don't have text are displayed as pseudo elements so we dont select them with the mouse*/ content:attr(data-stok); background:#202020; font-size:0.75rem; color:white; margin: 0 0.25rem; padding: 0.25rem; border-radius:4px } .special-token:not(:empty):before { /* Special tokens that have text (UNK) are displayed above the actual text*/ content:attr(data-stok); position:absolute; bottom:1.75rem; min-width:100%; width:100%; height:1rem; line-height:1rem; font-size:1rem; text-align:center; color:white; font-weight:bold; background:#202020; border-radius:10%; } /* We want to alternate the color of tokens, but we can't use nth child because tokens might be broken up by annotations instead we apply even and odd class at generation time and color them that way */ .even-token{ background:#DCDCDC ; border: 1px solid #DCDCDC; } .odd-token{ background:#A0A0A0; border: 1px solid #A0A0A0; } .even-token.multi-token,.odd-token.multi-token{ background: repeating-linear-gradient( 45deg, transparent, transparent 1px, #ccc 1px, #ccc 1px ), /* on "bottom" */ linear-gradient( to bottom, #FFB6C1, #999 ); } .multi-token:hover::after { content:"This char has more than 1 token"; /* The content defines the width of the annotation*/ color:white; background-color: black; position:absolute; font-size:0.75rem; text-align:center; font-weight:bold; text-overflow:ellipsis; top:1.75rem; line-height:0; overflow: hidden; white-space: nowrap; left:0; width:fit-content; /* 100% of the parent, which is the annotation whose width is the tokens inside it*/ padding:0.5rem 0; }
tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/tools/visualizer-styles.css", "repo_id": "tokenizers", "token_count": 1806 }
220
use std::sync::{Arc, RwLock}; use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use serde::ser::SerializeStruct; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use tk::normalizer::SplitDelimiterBehavior; use tk::pre_tokenizers::bert::BertPreTokenizer; use tk::pre_tokenizers::byte_level::ByteLevel; use tk::pre_tokenizers::delimiter::CharDelimiterSplit; use tk::pre_tokenizers::digits::Digits; use tk::pre_tokenizers::metaspace::{Metaspace, PrependScheme}; use tk::pre_tokenizers::punctuation::Punctuation; use tk::pre_tokenizers::split::Split; use tk::pre_tokenizers::unicode_scripts::UnicodeScripts; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use tk::tokenizer::Offsets; use tk::{PreTokenizedString, PreTokenizer}; use tokenizers as tk; use super::error::ToPyResult; use super::utils::*; /// Base class for all pre-tokenizers /// /// This class is not supposed to be instantiated directly. Instead, any implementation of a /// PreTokenizer will return an instance of this class when instantiated. #[pyclass( dict, module = "tokenizers.pre_tokenizers", name = "PreTokenizer", subclass )] #[derive(Clone, Serialize, Deserialize)] pub struct PyPreTokenizer { #[serde(flatten)] pub(crate) pretok: PyPreTokenizerTypeWrapper, } impl PyPreTokenizer { #[allow(dead_code)] pub(crate) fn new(pretok: PyPreTokenizerTypeWrapper) -> Self { PyPreTokenizer { pretok } } pub(crate) fn get_as_subtype(&self, py: Python<'_>) -> PyResult<PyObject> { let base = self.clone(); Ok(match &self.pretok { PyPreTokenizerTypeWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PyPreTokenizerTypeWrapper::Single(ref inner) => { match &*inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Custom(_) => Py::new(py, base)?.into_py(py), PyPreTokenizerWrapper::Wrapped(inner) => match inner { PreTokenizerWrapper::Whitespace(_) => { Py::new(py, (PyWhitespace {}, base))?.into_py(py) } PreTokenizerWrapper::Split(_) => { Py::new(py, (PySplit {}, base))?.into_py(py) } PreTokenizerWrapper::Punctuation(_) => { Py::new(py, (PyPunctuation {}, base))?.into_py(py) } PreTokenizerWrapper::Sequence(_) => { Py::new(py, (PySequence {}, base))?.into_py(py) } PreTokenizerWrapper::Metaspace(_) => { Py::new(py, (PyMetaspace {}, base))?.into_py(py) } PreTokenizerWrapper::Delimiter(_) => { Py::new(py, (PyCharDelimiterSplit {}, base))?.into_py(py) } PreTokenizerWrapper::WhitespaceSplit(_) => { Py::new(py, (PyWhitespaceSplit {}, base))?.into_py(py) } PreTokenizerWrapper::ByteLevel(_) => { Py::new(py, (PyByteLevel {}, base))?.into_py(py) } PreTokenizerWrapper::BertPreTokenizer(_) => { Py::new(py, (PyBertPreTokenizer {}, base))?.into_py(py) } PreTokenizerWrapper::Digits(_) => { Py::new(py, (PyDigits {}, base))?.into_py(py) } PreTokenizerWrapper::UnicodeScripts(_) => { Py::new(py, (PyUnicodeScripts {}, base))?.into_py(py) } }, } } }) } } impl PreTokenizer for PyPreTokenizer { fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> tk::Result<()> { self.pretok.pre_tokenize(normalized) } } #[pymethods] impl PyPreTokenizer { #[staticmethod] fn custom(pretok: PyObject) -> Self { PyPreTokenizer { pretok: PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(pretok)).into(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.pretok).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle PreTokenizer: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { let unpickled = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle PreTokenizer: {}", e )) })?; self.pretok = unpickled; Ok(()) } Err(e) => Err(e), } } /// Pre-tokenize a :class:`~tokenizers.PyPreTokenizedString` in-place /// /// This method allows to modify a :class:`~tokenizers.PreTokenizedString` to /// keep track of the pre-tokenization, and leverage the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you just want to see the result of /// the pre-tokenization of a raw string, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize_str` /// /// Args: /// pretok (:class:`~tokenizers.PreTokenizedString): /// The pre-tokenized string on which to apply this /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` #[pyo3(text_signature = "(self, pretok)")] fn pre_tokenize(&self, pretok: &mut PyPreTokenizedString) -> PyResult<()> { ToPyResult(self.pretok.pre_tokenize(&mut pretok.pretok)).into() } /// Pre tokenize the given string /// /// This method provides a way to visualize the effect of a /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` but it does not keep track of the /// alignment, nor does it provide all the capabilities of the /// :class:`~tokenizers.PreTokenizedString`. If you need some of these, you can use /// :meth:`~tokenizers.pre_tokenizers.PreTokenizer.pre_tokenize` /// /// Args: /// sequence (:obj:`str`): /// A string to pre-tokeize /// /// Returns: /// :obj:`List[Tuple[str, Offsets]]`: /// A list of tuple with the pre-tokenized parts and their offsets #[pyo3(text_signature = "(self, sequence)")] fn pre_tokenize_str(&self, s: &str) -> PyResult<Vec<(String, Offsets)>> { let mut pretokenized = tk::tokenizer::PreTokenizedString::from(s); ToPyResult(self.pretok.pre_tokenize(&mut pretokenized)).into_py()?; Ok(pretokenized .get_splits(tk::OffsetReferential::Original, tk::OffsetType::Char) .into_iter() .map(|(s, o, _)| (s.to_owned(), o)) .collect()) } } macro_rules! getter { ($self: ident, $variant: ident, $($name: tt)+) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref pretok)) = *single.read().unwrap() { pretok.$($name)+ } else { unreachable!() } } else { unreachable!() } }}; } macro_rules! setter { ($self: ident, $variant: ident, $name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name = $value; } } }}; ($self: ident, $variant: ident, @$name: ident, $value: expr) => {{ let super_ = $self.as_ref(); if let PyPreTokenizerTypeWrapper::Single(ref single) = super_.pretok { if let PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::$variant(ref mut pretok)) = *single.write().unwrap() { pretok.$name($value); } } }}; } /// ByteLevel PreTokenizer /// /// This pre-tokenizer takes care of replacing all bytes of the given string /// with a corresponding representation, as well as splitting into words. /// /// Args: /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. /// use_regex (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Set this to :obj:`False` to prevent this `pre_tokenizer` from using /// the GPT2 specific regexp for spliting on whitespace. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "ByteLevel")] pub struct PyByteLevel {} #[pymethods] impl PyByteLevel { #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, ByteLevel, add_prefix_space, add_prefix_space); } #[getter] fn get_use_regex(self_: PyRef<Self>) -> bool { getter!(self_, ByteLevel, use_regex) } #[setter] fn set_use_regex(self_: PyRef<Self>, use_regex: bool) { setter!(self_, ByteLevel, use_regex, use_regex); } #[new] #[pyo3(signature = (add_prefix_space = true, use_regex = true, **_kwargs), text_signature = "(self, add_prefix_space=True, use_regex=True)")] fn new( add_prefix_space: bool, use_regex: bool, _kwargs: Option<&PyDict>, ) -> (Self, PyPreTokenizer) { ( PyByteLevel {}, ByteLevel::default() .add_prefix_space(add_prefix_space) .use_regex(use_regex) .into(), ) } /// Returns the alphabet used by this PreTokenizer. /// /// Since the ByteLevel works as its name suggests, at the byte level, it /// encodes each byte value to a unique visible character. This means that there is a /// total of 256 different characters composing this alphabet. /// /// Returns: /// :obj:`List[str]`: A list of characters that compose the alphabet #[staticmethod] #[pyo3(text_signature = "()")] fn alphabet() -> Vec<String> { ByteLevel::alphabet() .into_iter() .map(|c| c.to_string()) .collect() } } /// This pre-tokenizer simply splits using the following regex: `\w+|[^\w\s]+` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Whitespace")] pub struct PyWhitespace {} #[pymethods] impl PyWhitespace { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespace {}, Whitespace {}.into()) } } /// This pre-tokenizer simply splits on the whitespace. Works like `.split()` #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "WhitespaceSplit")] pub struct PyWhitespaceSplit {} #[pymethods] impl PyWhitespaceSplit { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyWhitespaceSplit {}, WhitespaceSplit.into()) } } /// Split PreTokenizer /// /// This versatile pre-tokenizer splits using the provided pattern and /// according to the provided behavior. The pattern can be inverted by /// making use of the invert flag. /// /// Args: /// pattern (:obj:`str` or :class:`~tokenizers.Regex`): /// A pattern used to split the string. Usually a string or a a regex built with `tokenizers.Regex` /// /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated", "merged_with_previous", "merged_with_next", /// "contiguous" /// /// invert (:obj:`bool`, `optional`, defaults to :obj:`False`): /// Whether to invert the pattern. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Split")] pub struct PySplit {} #[pymethods] impl PySplit { #[new] #[pyo3(signature = (pattern, behavior, invert = false), text_signature = "(self, pattern, behavior, invert=False)")] fn new( pattern: PyPattern, behavior: PySplitDelimiterBehavior, invert: bool, ) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PySplit {}, ToPyResult(Split::new(pattern, behavior.into(), invert)) .into_py()? .into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" ", "removed"]) } } /// This pre-tokenizer simply splits on the provided char. Works like `.split(delimiter)` /// /// Args: /// delimiter: str: /// The delimiter char that will be used to split input #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "CharDelimiterSplit")] pub struct PyCharDelimiterSplit {} #[pymethods] impl PyCharDelimiterSplit { #[getter] fn get_delimiter(self_: PyRef<Self>) -> String { getter!(self_, Delimiter, delimiter.to_string()) } #[setter] fn set_delimiter(self_: PyRef<Self>, delimiter: PyChar) { setter!(self_, Delimiter, delimiter, delimiter.0); } #[new] #[pyo3(text_signature = None)] pub fn new(delimiter: PyChar) -> PyResult<(Self, PyPreTokenizer)> { Ok(( PyCharDelimiterSplit {}, CharDelimiterSplit::new(delimiter.0).into(), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [" "]) } } /// BertPreTokenizer /// /// This pre-tokenizer splits tokens on spaces, and also on punctuation. /// Each occurence of a punctuation character will be treated separately. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "BertPreTokenizer")] pub struct PyBertPreTokenizer {} #[pymethods] impl PyBertPreTokenizer { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyBertPreTokenizer {}, BertPreTokenizer.into()) } } /// This pre-tokenizer simply splits on punctuation as individual characters. /// /// Args: /// behavior (:class:`~tokenizers.SplitDelimiterBehavior`): /// The behavior to use when splitting. /// Choices: "removed", "isolated" (default), "merged_with_previous", "merged_with_next", /// "contiguous" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Punctuation")] pub struct PyPunctuation {} #[pymethods] impl PyPunctuation { #[new] #[pyo3( signature = (behavior = PySplitDelimiterBehavior(SplitDelimiterBehavior::Isolated)), text_signature = "(self, behavior=\"isolated\")")] fn new(behavior: PySplitDelimiterBehavior) -> (Self, PyPreTokenizer) { (PyPunctuation {}, Punctuation::new(behavior.into()).into()) } } /// This pre-tokenizer composes other pre_tokenizers and applies them in sequence #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Sequence")] pub struct PySequence {} #[pymethods] impl PySequence { #[new] #[pyo3(text_signature = "(self, pretokenizers)")] fn new(pre_tokenizers: &PyList) -> PyResult<(Self, PyPreTokenizer)> { let mut sequence = Vec::with_capacity(pre_tokenizers.len()); for n in pre_tokenizers.iter() { let pretokenizer: PyRef<PyPreTokenizer> = n.extract()?; match &pretokenizer.pretok { PyPreTokenizerTypeWrapper::Sequence(inner) => { sequence.extend(inner.iter().cloned()) } PyPreTokenizerTypeWrapper::Single(inner) => sequence.push(inner.clone()), } } Ok(( PySequence {}, PyPreTokenizer::new(PyPreTokenizerTypeWrapper::Sequence(sequence)), )) } fn __getnewargs__<'p>(&self, py: Python<'p>) -> &'p PyTuple { PyTuple::new(py, [PyList::empty(py)]) } } fn from_string(string: String) -> Result<PrependScheme, PyErr> { let scheme = match string.as_str() { "first" => PrependScheme::First, "never" => PrependScheme::Never, "always" => PrependScheme::Always, _ => { return Err(exceptions::PyValueError::new_err(format!( "{} is an unknown variant, should be one of ['first', 'never', 'always']", string ))); } }; Ok(scheme) } /// Metaspace pre-tokenizer /// /// This pre-tokenizer replaces any whitespace by the provided replacement character. /// It then tries to split on these spaces. /// /// Args: /// replacement (:obj:`str`, `optional`, defaults to :obj:`▁`): /// The replacement character. Must be exactly one character. By default we /// use the `▁` (U+2581) meta symbol (Same as in SentencePiece). /// /// add_prefix_space (:obj:`bool`, `optional`, defaults to :obj:`True`): /// Whether to add a space to the first word if there isn't already one. This /// lets us treat `hello` exactly like `say hello`. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Metaspace")] pub struct PyMetaspace {} #[pymethods] impl PyMetaspace { #[getter] fn get_replacement(self_: PyRef<Self>) -> String { getter!(self_, Metaspace, get_replacement().to_string()) } #[setter] fn set_replacement(self_: PyRef<Self>, replacement: PyChar) { setter!(self_, Metaspace, @set_replacement, replacement.0); } #[getter] fn get_add_prefix_space(self_: PyRef<Self>) -> bool { getter!(self_, Metaspace, add_prefix_space) } #[setter] fn set_add_prefix_space(self_: PyRef<Self>, add_prefix_space: bool) { setter!(self_, Metaspace, add_prefix_space, add_prefix_space); } #[getter] fn get_prepend_scheme(self_: PyRef<Self>) -> String { // Assuming Metaspace has a method to get the prepend_scheme as a string let scheme: PrependScheme = getter!(self_, Metaspace, get_prepend_scheme()); match scheme { PrependScheme::First => "first", PrependScheme::Never => "never", PrependScheme::Always => "always", } .to_string() } #[setter] fn set_prepend_scheme(self_: PyRef<Self>, prepend_scheme: String) -> PyResult<()> { let scheme = from_string(prepend_scheme)?; setter!(self_, Metaspace, @set_prepend_scheme, scheme); Ok(()) } #[new] #[pyo3(signature = (replacement = PyChar('▁'), add_prefix_space = true, prepend_scheme=None, **_kwargs), text_signature = "(self, replacement=\"_\", add_prefix_space=True)")] fn new( replacement: PyChar, add_prefix_space: bool, prepend_scheme: Option<String>, _kwargs: Option<&PyDict>, ) -> PyResult<(Self, PyPreTokenizer)> { // Create a new Metaspace instance let mut new_instance: Metaspace = Metaspace::new(replacement.0, add_prefix_space); // If a prepend scheme is provided, set it if let Some(prepend_scheme) = prepend_scheme { match from_string(prepend_scheme) { Ok(prepend_scheme_enum) => new_instance.set_prepend_scheme(prepend_scheme_enum), Err(err) => return Err(err), } } Ok((PyMetaspace {}, new_instance.into())) } } /// This pre-tokenizer simply splits using the digits in separate tokens /// /// Args: /// individual_digits (:obj:`bool`, `optional`, defaults to :obj:`False`): /// If set to True, digits will each be separated as follows:: /// /// "Call 123 please" -> "Call ", "1", "2", "3", " please" /// /// If set to False, digits will grouped as follows:: /// /// "Call 123 please" -> "Call ", "123", " please" #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "Digits")] pub struct PyDigits {} #[pymethods] impl PyDigits { #[getter] fn get_individual_digits(self_: PyRef<Self>) -> bool { getter!(self_, Digits, individual_digits) } #[setter] fn set_individual_digits(self_: PyRef<Self>, individual_digits: bool) { setter!(self_, Digits, individual_digits, individual_digits); } #[new] #[pyo3(signature = (individual_digits = false), text_signature = "(self, individual_digits=False)")] fn new(individual_digits: bool) -> (Self, PyPreTokenizer) { (PyDigits {}, Digits::new(individual_digits).into()) } } /// This pre-tokenizer splits on characters that belong to different language family /// It roughly follows https://github.com/google/sentencepiece/blob/master/data/Scripts.txt /// Actually Hiragana and Katakana are fused with Han, and 0x30FC is Han too. /// This mimicks SentencePiece Unigram implementation. #[pyclass(extends=PyPreTokenizer, module = "tokenizers.pre_tokenizers", name = "UnicodeScripts")] pub struct PyUnicodeScripts {} #[pymethods] impl PyUnicodeScripts { #[new] #[pyo3(text_signature = "(self)")] fn new() -> (Self, PyPreTokenizer) { (PyUnicodeScripts {}, UnicodeScripts::new().into()) } } #[derive(Clone)] pub(crate) struct CustomPreTokenizer { inner: PyObject, } impl CustomPreTokenizer { pub fn new(inner: PyObject) -> Self { Self { inner } } } impl tk::tokenizer::PreTokenizer for CustomPreTokenizer { fn pre_tokenize(&self, sentence: &mut PreTokenizedString) -> tk::Result<()> { Python::with_gil(|py| { let pretok = PyPreTokenizedStringRefMut::new(sentence); let py_pretok = self.inner.as_ref(py); py_pretok.call_method("pre_tokenize", (pretok.get(),), None)?; Ok(()) }) } } impl Serialize for CustomPreTokenizer { fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { Err(serde::ser::Error::custom( "Custom PreTokenizer cannot be serialized", )) } } impl<'de> Deserialize<'de> for CustomPreTokenizer { fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { Err(serde::de::Error::custom( "Custom PreTokenizer cannot be deserialized", )) } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerWrapper { Custom(CustomPreTokenizer), Wrapped(PreTokenizerWrapper), } impl Serialize for PyPreTokenizerWrapper { fn serialize<S>(&self, serializer: S) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error> where S: Serializer, { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.serialize(serializer), PyPreTokenizerWrapper::Custom(inner) => inner.serialize(serializer), } } } #[derive(Clone, Deserialize)] #[serde(untagged)] pub(crate) enum PyPreTokenizerTypeWrapper { Sequence(Vec<Arc<RwLock<PyPreTokenizerWrapper>>>), Single(Arc<RwLock<PyPreTokenizerWrapper>>), } impl Serialize for PyPreTokenizerTypeWrapper { fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: Serializer, { match self { PyPreTokenizerTypeWrapper::Sequence(seq) => { let mut ser = serializer.serialize_struct("Sequence", 2)?; ser.serialize_field("type", "Sequence")?; ser.serialize_field("pretokenizers", seq)?; ser.end() } PyPreTokenizerTypeWrapper::Single(inner) => inner.serialize(serializer), } } } impl<I> From<I> for PyPreTokenizerWrapper where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerWrapper::Wrapped(pretok.into()) } } impl<I> From<I> for PyPreTokenizerTypeWrapper where I: Into<PyPreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizerTypeWrapper::Single(Arc::new(RwLock::new(pretok.into()))) } } impl<I> From<I> for PyPreTokenizer where I: Into<PreTokenizerWrapper>, { fn from(pretok: I) -> Self { PyPreTokenizer { pretok: pretok.into().into(), } } } impl PreTokenizer for PyPreTokenizerTypeWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerTypeWrapper::Single(inner) => inner.read().unwrap().pre_tokenize(pretok), PyPreTokenizerTypeWrapper::Sequence(inner) => inner .iter() .try_for_each(|n| n.read().unwrap().pre_tokenize(pretok)), } } } impl PreTokenizer for PyPreTokenizerWrapper { fn pre_tokenize(&self, pretok: &mut PreTokenizedString) -> tk::Result<()> { match self { PyPreTokenizerWrapper::Wrapped(inner) => inner.pre_tokenize(pretok), PyPreTokenizerWrapper::Custom(inner) => inner.pre_tokenize(pretok), } } } /// PreTokenizers Module #[pymodule] pub fn pre_tokenizers(_py: Python, m: &PyModule) -> PyResult<()> { m.add_class::<PyPreTokenizer>()?; m.add_class::<PyByteLevel>()?; m.add_class::<PyWhitespace>()?; m.add_class::<PyWhitespaceSplit>()?; m.add_class::<PySplit>()?; m.add_class::<PyBertPreTokenizer>()?; m.add_class::<PyMetaspace>()?; m.add_class::<PyCharDelimiterSplit>()?; m.add_class::<PyPunctuation>()?; m.add_class::<PySequence>()?; m.add_class::<PyDigits>()?; m.add_class::<PyUnicodeScripts>()?; Ok(()) } #[cfg(test)] mod test { use pyo3::prelude::*; use tk::pre_tokenizers::sequence::Sequence; use tk::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit}; use tk::pre_tokenizers::PreTokenizerWrapper; use crate::pre_tokenizers::{ CustomPreTokenizer, PyPreTokenizer, PyPreTokenizerTypeWrapper, PyPreTokenizerWrapper, }; #[test] fn get_subtype() { Python::with_gil(|py| { let py_norm = PyPreTokenizer::new(Whitespace {}.into()); let py_wsp = py_norm.get_as_subtype(py).unwrap(); assert_eq!("Whitespace", py_wsp.as_ref(py).get_type().name().unwrap()); }) } #[test] fn serialize() { let py_wrapped: PyPreTokenizerWrapper = Whitespace {}.into(); let py_ser = serde_json::to_string(&py_wrapped).unwrap(); let rs_wrapped = PreTokenizerWrapper::Whitespace(Whitespace {}); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_ser, rs_ser); let py_pretok: PyPreTokenizer = serde_json::from_str(&rs_ser).unwrap(); match py_pretok.pretok { PyPreTokenizerTypeWrapper::Single(inner) => match *inner.as_ref().read().unwrap() { PyPreTokenizerWrapper::Wrapped(PreTokenizerWrapper::Whitespace(_)) => {} _ => panic!("Expected Whitespace"), }, _ => panic!("Expected wrapped, not custom."), } let py_seq: PyPreTokenizerWrapper = Sequence::new(vec![Whitespace {}.into(), WhitespaceSplit.into()]).into(); let py_wrapper_ser = serde_json::to_string(&py_seq).unwrap(); let rs_wrapped = PreTokenizerWrapper::Sequence(Sequence::new(vec![ Whitespace {}.into(), WhitespaceSplit.into(), ])); let rs_ser = serde_json::to_string(&rs_wrapped).unwrap(); assert_eq!(py_wrapper_ser, rs_ser); let py_seq = PyPreTokenizer::new(py_seq.into()); let py_ser = serde_json::to_string(&py_seq).unwrap(); assert_eq!(py_wrapper_ser, py_ser); let obj = Python::with_gil(|py| { let py_wsp = PyPreTokenizer::new(Whitespace {}.into()); let obj: PyObject = Py::new(py, py_wsp).unwrap().into_py(py); obj }); let py_seq: PyPreTokenizerWrapper = PyPreTokenizerWrapper::Custom(CustomPreTokenizer::new(obj)); assert!(serde_json::to_string(&py_seq).is_err()); } }
tokenizers/bindings/python/src/pre_tokenizers.rs/0
{ "file_path": "tokenizers/bindings/python/src/pre_tokenizers.rs", "repo_id": "tokenizers", "token_count": 13027 }
221
import pickle import pytest from tokenizers.models import BPE, Model, WordLevel, WordPiece from ..utils import bert_files, data_dir, roberta_files class TestBPE: def test_instantiate(self, roberta_files): assert isinstance(BPE(), Model) assert isinstance(BPE(), BPE) vocab = {"a": 0, "b": 1, "ab": 2} merges = [("a", "b")] assert isinstance(BPE(vocab, merges), Model) assert isinstance(BPE.from_file(roberta_files["vocab"], roberta_files["merges"]), BPE) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=vocab) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=merges) assert isinstance( pickle.loads(pickle.dumps(BPE(vocab, merges))), BPE, ) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(BPE(roberta_files["vocab"], roberta_files["merges"]), Model) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(vocab=roberta_files["vocab"]) with pytest.raises(ValueError, match="`vocab` and `merges` must be both specified"): BPE(merges=roberta_files["merges"]) with pytest.deprecated_call(): assert isinstance( pickle.loads(pickle.dumps(BPE(roberta_files["vocab"], roberta_files["merges"]))), BPE, ) def test_can_modify(self): model = BPE( dropout=0.5, unk_token="[UNK]", continuing_subword_prefix="__prefix__", end_of_word_suffix="__suffix__", fuse_unk=False, ) assert model.dropout == 0.5 assert model.unk_token == "[UNK]" assert model.continuing_subword_prefix == "__prefix__" assert model.end_of_word_suffix == "__suffix__" assert model.fuse_unk == False assert model.byte_fallback == False # Modify these model.dropout = 0.1 assert pytest.approx(model.dropout) == 0.1 model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = None assert model.continuing_subword_prefix == None model.end_of_word_suffix = "suff" assert model.end_of_word_suffix == "suff" model.fuse_unk = True assert model.fuse_unk == True model.byte_fallback = True assert model.byte_fallback == True class TestWordPiece: def test_instantiate(self, bert_files): assert isinstance(WordPiece(), Model) assert isinstance(WordPiece(), WordPiece) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordPiece(vocab), Model) assert isinstance(WordPiece(vocab), WordPiece) assert isinstance(WordPiece.from_file(bert_files["vocab"]), WordPiece) assert isinstance(pickle.loads(pickle.dumps(WordPiece(vocab))), WordPiece) # Deprecated calls in 0.9 with pytest.deprecated_call(): assert isinstance(WordPiece(bert_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(pickle.loads(pickle.dumps(WordPiece(bert_files["vocab"]))), WordPiece) def test_can_modify(self): model = WordPiece( unk_token="<oov>", continuing_subword_prefix="__prefix__", max_input_chars_per_word=200, ) assert model.unk_token == "<oov>" assert model.continuing_subword_prefix == "__prefix__" assert model.max_input_chars_per_word == 200 # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>" model.continuing_subword_prefix = "$$$" assert model.continuing_subword_prefix == "$$$" model.max_input_chars_per_word = 10 assert model.max_input_chars_per_word == 10 class TestWordLevel: def test_instantiate(self, roberta_files): assert isinstance(WordLevel(), Model) assert isinstance(WordLevel(), WordLevel) vocab = {"a": 0, "b": 1, "ab": 2} assert isinstance(WordLevel(vocab), Model) assert isinstance(WordLevel(vocab), WordLevel) assert isinstance(WordLevel.from_file(roberta_files["vocab"]), WordLevel) # The WordLevel model expects a vocab.json using the same format as roberta # so we can just try to load with this file with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), Model) with pytest.deprecated_call(): assert isinstance(WordLevel(roberta_files["vocab"]), WordLevel) def test_can_modify(self): model = WordLevel(unk_token="<oov>") assert model.unk_token == "<oov>" # Modify these model.unk_token = "<unk>" assert model.unk_token == "<unk>"
tokenizers/bindings/python/tests/bindings/test_models.py/0
{ "file_path": "tokenizers/bindings/python/tests/bindings/test_models.py", "repo_id": "tokenizers", "token_count": 2253 }
222
import json import os import unittest import tqdm from huggingface_hub import HfApi, cached_download, hf_hub_url from tokenizers import Tokenizer from .utils import albert_base, data_dir class TestSerialization: def test_full_serialization_albert(self, albert_base): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity Tokenizer.from_file(albert_base) def check(tokenizer_file) -> bool: with open(tokenizer_file, "r") as f: data = json.load(f) if "pre_tokenizer" not in data: return True if "type" not in data["pre_tokenizer"]: return False if data["pre_tokenizer"]["type"] == "Sequence": for pre_tok in data["pre_tokenizer"]["pretokenizers"]: if "type" not in pre_tok: return False return True def slow(test_case): """ Decorator marking a test as slow. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. """ if os.getenv("RUN_SLOW") != "1": return unittest.skip("use `RUN_SLOW=1` to run")(test_case) else: return test_case @slow class TestFullDeserialization(unittest.TestCase): def test_full_deserialization_hub(self): # Check we can read this file. # This used to fail because of BufReader that would fail because the # file exceeds the buffer capacity not_loadable = [] invalid_pre_tokenizer = [] # models = api.list_models(filter="transformers") # for model in tqdm.tqdm(models): # model_id = model.modelId # for model_file in model.siblings: # filename = model_file.rfilename # if filename == "tokenizer.json": # all_models.append((model_id, filename)) all_models = [("HueyNemud/das22-10-camembert_pretrained", "tokenizer.json")] for model_id, filename in tqdm.tqdm(all_models): tokenizer_file = cached_download(hf_hub_url(model_id, filename=filename)) is_ok = check(tokenizer_file) if not is_ok: print(f"{model_id} is affected by no type") invalid_pre_tokenizer.append(model_id) try: Tokenizer.from_file(tokenizer_file) except Exception as e: print(f"{model_id} is not loadable: {e}") not_loadable.append(model_id) except: # noqa: E722 print(f"{model_id} is not loadable: Rust error") not_loadable.append(model_id) self.assertEqual(invalid_pre_tokenizer, []) self.assertEqual(not_loadable, [])
tokenizers/bindings/python/tests/test_serialization.py/0
{ "file_path": "tokenizers/bindings/python/tests/test_serialization.py", "repo_id": "tokenizers", "token_count": 1240 }
223
# Visualizer <tokenizerslangcontent> <python> ## Annotation [[autodoc]] tokenizers.tools.Annotation ## EncodingVisualizer [[autodoc]] tokenizers.tools.EncodingVisualizer - __call__ </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/visualizer.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/visualizer.mdx", "repo_id": "tokenizers", "token_count": 134 }
224
# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [0.13.2] - Python only changes ## [0.13.1] - [#1072] Fixing Roberta type ids. ## [0.13.0] - [#1009] `unstable_wasm` feature to support building on Wasm (it's unstable !) - [#1008] `Decoder` is now a composable trait, but without being backward incompatible - [#1047, #1051, #1052] `Processor` is now a composable trait, but without being backward incompatible Both trait changes warrant a "major" number since, despite best efforts to not break backward compatibility, the code is different enough that we cannot be exactly sure. ## [0.12.1] - [#938] **Reverted breaking change**. https://github.com/huggingface/transformers/issues/16520 ## [0.12.0] YANKED Bump minor version because of a breaking change. - [#938] [REVERTED IN 0.12.1] **Breaking change**. Decoder trait is modified to be composable. This is only breaking if you are using decoders on their own. tokenizers should be error free. - [#939] Making the regex in `ByteLevel` pre_tokenizer optional (necessary for BigScience) - [#952] Fixed the vocabulary size of UnigramTrainer output (to respect added tokens) - [#954] Fixed not being able to save vocabularies with holes in vocab (ConvBert). Yell warnings instead, but stop panicking. - [#961] Added link for Ruby port of `tokenizers` - [#960] Feature gate for `cli` and its `clap` dependency ## [0.11.3] - [#919] Fixing single_word AddedToken. (regression from 0.11.2) - [#916] Deserializing faster `added_tokens` by loading them in batch. ## [0.11.2] - [#884] Fixing bad deserialization following inclusion of a default for Punctuation ## [0.11.1] - [#882] Fixing Punctuation deserialize without argument. - [#868] Fixing missing direction in TruncationParams - [#860] Adding TruncationSide to TruncationParams ## [0.11.0] ### Fixed - [#236]: Fix a bug with offsets being shifted when there are sub-sequences (Usually with special tokens and/or added tokens in the sequence). - [#286]: Fix various crash when training a BPE model - [#309]: Fixed a few bugs related to additional vocabulary/tokens - [#363]: Fix panic from unwrapping `File::open` in `count_words` ### Changed - [#234]: Completely changed the alignement mappings available on `Encoding`. Previous mappings were misleading and only providing offsets. New ones provide methods to easily convert between `char` or `word` (input space) and `token` (output space) - [#236]: `AddedToken` with special options like `rstrip` will keep the matched whitespaces in the textual representation of the token, exposed in `tokens` on the `Encoding`. The ID stays the same as usual. This fixes the offsets for said tokens. - [#236]: Offsets are now converted back to the original referential before we merge the sub-sequences together and then do the post-processing. This also fixes some offsets bugs. - [#236]: ByteLevel PostProcessor now uses the `add_prefix_space` attribute to determine how to trim offsets. - Improved `TruncationError` to handle cases where provided max length is too low. - [#249]: `encode` and `encode_batch` input has been greatly improved, and it now also accept pre-tokenized inputs. - Improved `TruncationError` to handle cases where provided max length is too low. - [#276]: Improve BPE training speeds, by reading files sequentially, but parallelizing the processing of each file - [#280]: Use `onig` for byte-level pre-tokenization to remove all the differences with the original implementation from GPT-2 - [#309]: Improved the management of the additional vocabulary. This introduces an option `normalized`, controlling whether a token should be extracted from the normalized version of the input text. - [#330]: BertNormalizer now keeps the same behavior than the original implementation when `strip_accents` is not specified. - [#355]: Tokenizer does not use any dynamic dispatch anymore. - [#377]: Use byte offsets everywhere (instead of the char offsets) ### Added - [#236]: RobertaProcessing is now also taking care of trimming offsets, and works just as ByteLevel on this front. - [#272]: Serialization of the `Tokenizer` and all the parts (`PreTokenizer`, `Normalizer`, ...) using serde. It is now easy to save/load an entire tokenizer. - [#289]: Ability to pad to a multiple of a specified value. This is especially useful to ensure activation of the Tensor Cores, while ensuring padding to a multiple of 8. - [#298]: Ability to get the currently set truncation/padding params - [#311]: Ability to enable/disable the parallelism using the `TOKENIZERS_PARALLELISM` environment variable. - [#403]: Add `TemplateProcessing` `PostProcessor`. ### How to migrate - Replace any `XXX_to_YYY_offsets()` method call by any of the new ones. - Specify the `add_prefix_space` and `trim_offsets` options on `RobertaProcessing` if you don't want the offsets trimmed out. - Any custom `PostProcessor` now handles offsets relative to the original string (as opposed to the normalized one). ## [0.10.1] ### Fixed - [#226]: Fix the word indexes when there are special tokens ## [0.10.0] ### Changed - [#222]: All Tokenizer's subparts must now be `Send + Sync` ### Added - [#208]: Ability to retrieve the vocabulary from the `Tokenizer` & `Model` ### Fixed - [#205]: Trim the decoded string in `BPEDecoder` - [b770f36]: Fix a bug with added tokens generated IDs ## [0.9.0] ### Changed - Only one progress bar while reading files during training. This is better for use-cases with a high number of files as it avoids having too many progress bars on screen. Also avoids reading the size of each file before starting to actually read these files, as this process could take really long. - [#190]: Improved BPE and WordPiece builders - [#193]: `encode` and `encode_batch` now take a new argument, specifying whether we should add the special tokens - [#197]: The `NormalizedString` has been removed from the `Encoding`. It is now possible to retrieve it by calling `normalize` on the `Tokenizer`. This brings a reduction of 70% of the memory footprint - [#197]: The `NormalizedString` API has been improved. It is now possible to retrieve parts of both strings using both "normalized" or "original" offsets - [#197]: The offsets provided on `Encoding` are now relative to the original string, and not the normalized one anymore - `AddedToken` are now used for both `add_special_tokens` and `add_tokens`. Also, these AddedToken have more options to allow various behaviors. ### Added - [#188]: `impl PostProcessor for ByteLevel`: Handles trimming the offsets if activated. This avoids the unintuitive inclusion of the whitespaces in the produced offsets, even if these whitespaces are part of the actual token - More alignment mappings on the `Encoding`. - `post_process` can be called on the `Tokenizer` ### Fixed - [#193]: Fix some issues with the offsets being wrong with the `ByteLevel` BPE: - when `add_prefix_space` is activated - [#156]: when a Unicode character gets split-up in multiple byte-level characters - Fix a bug where offsets were wrong when there was any added tokens in the sequence being encoded. - [#175]: Fix a bug that prevented the addition of more than a certain amount of tokens (even if not advised, but that's not the question) ### How to migrate - Add the `ByteLevel` `PostProcessor` to your byte-level BPE tokenizers if relevant. ## [0.8.0] ### Changed - [#165]: Big improvements in speed for BPE (Both training and tokenization) ### Fixed - [#163]: Do not open all files directly while training - [#156]: There was a bug in ByteLevel PreTokenizer that caused offsets to be wrong if a char got split up in multiple bytes - [#174]: The `LongestFirst` truncation strategy had a bug [#1072]: https://github.com/huggingface/tokenizers/pull/1072 [#956]: https://github.com/huggingface/tokenizers/pull/956 [#1008]: https://github.com/huggingface/tokenizers/pull/1008 [#1009]: https://github.com/huggingface/tokenizers/pull/1009 [#1047]: https://github.com/huggingface/tokenizers/pull/1047 [#1055]: https://github.com/huggingface/tokenizers/pull/1055 [#1051]: https://github.com/huggingface/tokenizers/pull/1051 [#1052]: https://github.com/huggingface/tokenizers/pull/1052 [#938]: https://github.com/huggingface/tokenizers/pull/938 [#939]: https://github.com/huggingface/tokenizers/pull/939 [#952]: https://github.com/huggingface/tokenizers/pull/952 [#954]: https://github.com/huggingface/tokenizers/pull/954 [#961]: https://github.com/huggingface/tokenizers/pull/961 [#960]: https://github.com/huggingface/tokenizers/pull/960 [#919]: https://github.com/huggingface/tokenizers/pull/919 [#916]: https://github.com/huggingface/tokenizers/pull/916 [#884]: https://github.com/huggingface/tokenizers/pull/884 [#882]: https://github.com/huggingface/tokenizers/pull/882 [#868]: https://github.com/huggingface/tokenizers/pull/868 [#860]: https://github.com/huggingface/tokenizers/pull/860 [#403]: https://github.com/huggingface/tokenizers/pull/403 [#377]: https://github.com/huggingface/tokenizers/pull/377 [#355]: https://github.com/huggingface/tokenizers/pull/355 [#363]: https://github.com/huggingface/tokenizers/pull/363 [#330]: https://github.com/huggingface/tokenizers/pull/330 [#311]: https://github.com/huggingface/tokenizers/pull/311 [#309]: https://github.com/huggingface/tokenizers/pull/309 [#298]: https://github.com/huggingface/tokenizers/pull/298 [#289]: https://github.com/huggingface/tokenizers/pull/289 [#286]: https://github.com/huggingface/tokenizers/pull/286 [#280]: https://github.com/huggingface/tokenizers/pull/280 [#276]: https://github.com/huggingface/tokenizers/pull/276 [#272]: https://github.com/huggingface/tokenizers/pull/272 [#249]: https://github.com/huggingface/tokenizers/pull/249 [b770f36]: https://github.com/huggingface/tokenizers/commit/b770f364280af33efeffea8f0003102cda8cf1b7 [#236]: https://github.com/huggingface/tokenizers/pull/236 [#234]: https://github.com/huggingface/tokenizers/pull/234 [#226]: https://github.com/huggingface/tokenizers/pull/226 [#222]: https://github.com/huggingface/tokenizers/pull/222 [#208]: https://github.com/huggingface/tokenizers/pull/208 [#205]: https://github.com/huggingface/tokenizers/issues/205 [#197]: https://github.com/huggingface/tokenizers/pull/197 [#193]: https://github.com/huggingface/tokenizers/pull/193 [#190]: https://github.com/huggingface/tokenizers/pull/190 [#188]: https://github.com/huggingface/tokenizers/pull/188 [#175]: https://github.com/huggingface/tokenizers/issues/175 [#174]: https://github.com/huggingface/tokenizers/issues/174 [#165]: https://github.com/huggingface/tokenizers/pull/165 [#163]: https://github.com/huggingface/tokenizers/issues/163 [#156]: https://github.com/huggingface/tokenizers/pull/156
tokenizers/tokenizers/CHANGELOG.md/0
{ "file_path": "tokenizers/tokenizers/CHANGELOG.md", "repo_id": "tokenizers", "token_count": 3388 }
225
pub fn set_panic_hook() { // When the `console_error_panic_hook` feature is enabled, we can call the // `set_panic_hook` function at least once during initialization, and then // we will get better error messages if our code ever panics. // // For more details see // https://github.com/rustwasm/console_error_panic_hook#readme #[cfg(feature = "console_error_panic_hook")] console_error_panic_hook::set_once(); }
tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs/0
{ "file_path": "tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs", "repo_id": "tokenizers", "token_count": 150 }
226
use crate::tokenizer::{Decoder, Result}; use monostate::MustBe; use serde::{Deserialize, Serialize}; #[derive(Deserialize, Clone, Debug, Serialize, Default)] /// ByteFallback is a simple trick which converts tokens looking like `<0x61>` /// to pure bytes, and attempts to make them into a string. If the tokens /// cannot be decoded you will get � instead for each inconvertable byte token #[non_exhaustive] pub struct ByteFallback { #[serde(rename = "type")] type_: MustBe!("ByteFallback"), } impl ByteFallback { pub fn new() -> Self { Self { type_: MustBe!("ByteFallback"), } } } impl Decoder for ByteFallback { fn decode_chain(&self, tokens: Vec<String>) -> Result<Vec<String>> { let mut new_tokens: Vec<String> = vec![]; let mut previous_byte_tokens: Vec<u8> = vec![]; for token in tokens { let bytes = if token.len() == 6 && token.starts_with("<0x") && token.ends_with('>') { if let Ok(byte) = u8::from_str_radix(&token[3..5], 16) { Some(byte) } else { None } } else { None }; if let Some(bytes) = bytes { previous_byte_tokens.push(bytes); } else { if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } previous_byte_tokens.clear(); } new_tokens.push(token); } } if !previous_byte_tokens.is_empty() { if let Ok(string) = String::from_utf8(previous_byte_tokens.clone()) { new_tokens.push(string); } else { for _ in 0..previous_byte_tokens.len() { new_tokens.push("�".into()); } } } Ok(new_tokens) } } #[cfg(test)] mod tests { use super::*; #[test] fn decode() { let decoder = ByteFallback::new(); let res = decoder .decode_chain(vec!["Hey".into(), "friend!".into()]) .unwrap(); assert_eq!(res, vec!["Hey", "friend!"]); let res = decoder.decode_chain(vec!["<0x61>".into()]).unwrap(); assert_eq!(res, vec!["a"]); let res = decoder.decode_chain(vec!["<0xE5>".into()]).unwrap(); assert_eq!(res, vec!["�"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into()]) .unwrap(); assert_eq!(res, vec!["�", "�"]); // 叫 let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "<0xab>".into()]) .unwrap(); assert_eq!(res, vec!["叫"]); let res = decoder .decode_chain(vec![ "<0xE5>".into(), "<0x8f>".into(), "<0xab>".into(), "a".into(), ]) .unwrap(); assert_eq!(res, vec!["叫", "a"]); let res = decoder .decode_chain(vec!["<0xE5>".into(), "<0x8f>".into(), "a".into()]) .unwrap(); assert_eq!(res, vec!["�", "�", "a"]); } }
tokenizers/tokenizers/src/decoders/byte_fallback.rs/0
{ "file_path": "tokenizers/tokenizers/src/decoders/byte_fallback.rs", "repo_id": "tokenizers", "token_count": 1938 }
227
use super::{ lattice::Lattice, trainer::UnigramTrainer, trie::{Trie, TrieBuilder}, }; use crate::tokenizer::{Model, Result, Token}; use crate::utils::cache::Cache; use std::collections::HashMap; use std::convert::TryInto; use std::fs::read_to_string; use std::path::{Path, PathBuf}; type TokenMap = HashMap<String, u32>; type Vocab = Vec<(String, f64)>; /// A `Unigram` model to encode sentences. pub struct Unigram { token_to_ids: TokenMap, pub(crate) vocab: Vocab, cache: Cache<String, Vec<String>>, trie: Trie<u8>, pub min_score: f64, pub(super) unk_id: Option<usize>, pub(super) bos_id: usize, pub(super) eos_id: usize, fuse_unk: bool, is_optimized: bool, byte_fallback: bool, } impl PartialEq for Unigram { fn eq(&self, other: &Self) -> bool { self.unk_id == other.unk_id && self.vocab == other.vocab } } impl Clone for Unigram { // `Clone` can't be derive because it's not implemented for `Cache`. // To keep things simple when we clone, the new Unigram will start with a fresh cache. fn clone(&self) -> Self { let fresh_cache = self.cache.fresh(); Self { vocab: self.vocab.clone(), cache: fresh_cache, token_to_ids: self.token_to_ids.clone(), trie: self.trie.clone(), min_score: self.min_score, unk_id: self.unk_id, bos_id: self.bos_id, eos_id: self.eos_id, fuse_unk: self.fuse_unk, is_optimized: self.is_optimized, byte_fallback: self.byte_fallback, } } } impl std::fmt::Debug for Unigram { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { fmt.debug_struct("Unigram") .field("vocab", &self.vocab.len()) .field("unk_id", &self.unk_id) .field("byte_fallback", &self.byte_fallback) .finish() } } static K_UNK_PENALTY: f64 = 10.0; #[derive(thiserror::Error, Debug)] pub enum UnigramError { #[error("The vocabulary is empty but at least <unk> is needed")] EmptyVocabulary, #[error("The `unk_id` is larger than vocabulary size")] UnkIdNotInVocabulary, #[error("Encountered an unknown token but `unk_id` is missing")] MissingUnkId, } impl Default for Unigram { fn default() -> Self { let vocab = vec![("<unk>".to_string(), 0.0)]; Self::from(vocab, Some(0), false).unwrap() } } impl Unigram { /// Create a `Unigram` model from a given vocabulary. /// Vocabulary are the various tokens and their associated score which is a sort of a logprob of /// their frequency, which will enable tokenization and sampling. /// unk_id, is the index within the vocabulary. /// For now `Unigram` *requires* at least `unk` because we might find a never seen char. /// Further versions might allow that part to be hidden. pub fn from( vocab: Vec<(String, f64)>, unk_id: Option<usize>, byte_fallback: bool, ) -> Result<Self> { let n = vocab.len(); let mut token_to_ids: TokenMap = HashMap::new(); let mut builder = TrieBuilder::default(); if let Some(unk_id) = unk_id { if vocab.is_empty() { return Err(Box::new(UnigramError::EmptyVocabulary)); } if unk_id >= vocab.len() { return Err(Box::new(UnigramError::UnkIdNotInVocabulary)); } } let bos_id = n + 1; let eos_id = n + 2; let mut min_score = f64::INFINITY; for (id, (token, score)) in vocab.iter().enumerate() { token_to_ids.insert(token.to_string(), id as u32); let bytes: Vec<u8> = token.bytes().collect(); builder.push(&bytes); if score < &min_score { min_score = *score; } } let trie = builder.build(); let fuse_unk = true; let is_optimized = true; Ok(Self { vocab, token_to_ids, trie, min_score, bos_id, eos_id, unk_id, fuse_unk, cache: Cache::default(), is_optimized, byte_fallback, }) } #[cfg(test)] pub(super) fn set_fuse_unk(&mut self, fuse_unk: bool) { self.fuse_unk = fuse_unk; self.cache = self.cache.fresh(); } #[cfg(test)] pub(super) fn set_optimized(&mut self, is_optimized: bool) { self.is_optimized = is_optimized; } pub fn byte_fallback(&self) -> bool { self.byte_fallback } pub(super) fn len(&self) -> usize { self.vocab.len() } pub(super) fn populate_nodes(&self, lattice: &mut Lattice) { let unk_score = self.min_score - K_UNK_PENALTY; let len = lattice.len(); let mut begin_pos = 0; while begin_pos < len { let mblen = lattice.sentence[begin_pos..] .chars() .next() .unwrap() .len_utf8(); let mut has_single_node = false; for bytes in self .trie .common_prefix_search(lattice.sentence.bytes().skip(begin_pos)) { let n = bytes.len(); let tok = String::from_utf8(bytes).unwrap(); let id = *self.token_to_ids.get(&tok).unwrap(); let item = &self.vocab[id as usize]; assert_eq!(item.0, tok); let score: f64 = item.1; lattice.insert(begin_pos, n, score, id.try_into().unwrap()); if !has_single_node && n == mblen { has_single_node = true; } } if !has_single_node { if let Some(unk_id) = self.unk_id { lattice.insert(begin_pos, mblen, unk_score, unk_id); } } begin_pos += mblen } } /// This functions take a String, and will encode it in a Vec of Strings, /// of the best tokenization available to the current model. /// ``` /// use tokenizers::models::unigram::Unigram; /// /// let pieces = vec![ /// ("<unk>".to_string(), 0.0), /// ("a".to_string(), 0.0), /// ("b".to_string(), 0.0), /// ("c".to_string(), 0.0), /// ("d".to_string(), 0.0), /// ("cd".to_string(), 1.0), /// ("ab".to_string(), 2.0), /// ("abc".to_string(), 5.0), /// ("abcd".to_string(), 10.0), /// ]; /// let model = Unigram::from(pieces, Some(0), false).unwrap(); /// let result = model.encode("abcdacdxx").unwrap(); /// assert_eq!(result, vec!["abcd", "a", "cd", "xx"]); /// ``` pub fn encode(&self, sentence: &str) -> Result<Vec<String>> { if sentence.is_empty() { return Ok(vec![]); } if let Some(result) = self.cache.get(sentence) { Ok(result.to_vec()) } else { let result = if self.is_optimized { self.encode_optimized(sentence)? } else { self.encode_unoptimized(sentence)? }; self.cache.set(sentence.to_owned(), result.clone()); Ok(result) } } fn encode_optimized(&self, sentence: &str) -> Result<Vec<String>> { // https://github.com/google/sentencepiece/blob/d48247191a6d50e469ed1a4a36e877befffd1851/src/unigram_model.cc#L600 #[derive(Debug, Clone)] struct BestPathNode { /// The vocab id. (maybe UNK) id: usize, /// The total score of the best path ending at this node. best_path_score: f64, /// The starting position (in utf-8) of this node. The entire best /// path can be constructed by backtracking along this link. starts_at: Option<usize>, } impl Default for BestPathNode { fn default() -> Self { Self { id: 0, best_path_score: 0.0, starts_at: None, } } } let size = sentence.len(); let unk_score = self.min_score - K_UNK_PENALTY; let mut best_path_ends_at = vec![BestPathNode::default(); size + 1]; let mut starts_at = 0; while starts_at < size { let best_path_score_till_here = best_path_ends_at[starts_at].best_path_score; let mut has_single_node = false; let mblen = sentence[starts_at..].chars().next().unwrap().len_utf8(); for tok_bytes in self .trie .common_prefix_search(sentence.bytes().skip(starts_at)) { let key_pos = starts_at + tok_bytes.len(); let token: String = String::from_utf8(tok_bytes).unwrap(); let target_node = &mut best_path_ends_at[key_pos]; let length = key_pos - starts_at; let id = self.token_to_ids.get(&token).unwrap(); let score = self.vocab.get(*id as usize).unwrap().1; let candidate_best_path_score = score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = *id as usize; } if !has_single_node && length == mblen { has_single_node = true; } } if !has_single_node { let target_node = &mut best_path_ends_at[starts_at + mblen]; let candidate_best_path_score = unk_score + best_path_score_till_here; if target_node.starts_at.is_none() || candidate_best_path_score > target_node.best_path_score { target_node.best_path_score = candidate_best_path_score; target_node.starts_at = Some(starts_at); target_node.id = self.unk_id.ok_or(UnigramError::MissingUnkId)?; } } starts_at += mblen } let mut ends_at = size; let mut results: Vec<String> = vec![]; let mut token = vec![]; while ends_at > 0 { let node = &best_path_ends_at[ends_at]; let starts_at = node.starts_at.unwrap(); if self.fuse_unk && self.unk_id.is_some() && node.id == self.unk_id.ok_or(UnigramError::MissingUnkId)? { token.push( String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(), ); } else { if !token.is_empty() { token.reverse(); results.push(token.concat()); token = vec![]; } results.push( String::from_utf8(sentence[starts_at..ends_at].as_bytes().to_vec()).unwrap(), ); } ends_at = starts_at; } if !token.is_empty() { token.reverse(); results.push(token.concat()); } results.reverse(); Ok(results) } fn encode_unoptimized(&self, sentence: &str) -> Result<Vec<String>> { let mut lattice = Lattice::from(sentence, self.bos_id, self.eos_id); self.populate_nodes(&mut lattice); if self.fuse_unk { let mut results = vec![]; let mut token = String::new(); for node in lattice.viterbi().iter() { let item = lattice.piece(&node.borrow()); if node.borrow().id == self.unk_id.ok_or(UnigramError::MissingUnkId)? { token.push_str(&item); } else { if !token.is_empty() { results.push(token); token = String::new(); } results.push(item.to_string()); } } if !token.is_empty() { results.push(token); } Ok(results) } else { Ok(lattice.tokens()) } } /// Iterate of vocabulary of the model as a pair of `(token, score)`. pub fn iter(&self) -> UnigramIterator { UnigramIterator { model: self, i: 0 } } /// Loads a SentencePiece output model after being trained by tokenizers. /// After that you can use the model with tokenizers library. /// ```no_run /// use tokenizers::models::unigram::Unigram; /// use std::path::Path; /// /// let model = Unigram::load("mymodel-unigram.json").unwrap(); /// ``` pub fn load<P: AsRef<Path>>(path: P) -> Result<Unigram> { let string = read_to_string(path)?; Ok(serde_json::from_str(&string)?) } } /// Iterator to iterate of vocabulary of the model, and their relative score. pub struct UnigramIterator<'a> { model: &'a Unigram, i: usize, } impl<'a> Iterator for UnigramIterator<'a> { type Item = &'a (String, f64); fn next(&mut self) -> Option<Self::Item> { let i = self.i; if i < self.model.len() { let r = Some(&self.model.vocab[i]); self.i += 1; r } else { None } } } impl Model for Unigram { type Trainer = UnigramTrainer; fn get_vocab(&self) -> HashMap<String, u32> { self.token_to_ids.clone() } fn get_vocab_size(&self) -> usize { self.vocab.len() } fn tokenize(&self, sentence: &str) -> Result<Vec<Token>> { let str_tokens = self.encode(sentence)?; let mut offset = 0; let mut tokens = Vec::with_capacity(str_tokens.len()); for string in str_tokens { let len = string.len(); let offsets = (offset, offset + len); let id: u32 = match self.token_to_ids.get(&string) { Some(id) => *id, None => { if self.byte_fallback { let byte_tokens: Option<Vec<_>> = string .bytes() .map(|byte| -> Option<Token> { let byte_string = format!("<0x{:02X}>", byte); let id = self.token_to_ids.get(&byte_string); id.map(|id| Token::new(*id, byte_string, (offset, offset + len))) }) .collect(); if let Some(byte_tokens) = byte_tokens { for token in byte_tokens { tokens.push(token); } offset += len; continue; } } self.unk_id.ok_or(UnigramError::MissingUnkId)? as u32 } }; offset += len; tokens.push(Token::new(id, string, offsets)); } Ok(tokens) } fn token_to_id(&self, token: &str) -> Option<u32> { self.token_to_ids.get(token).copied() } fn id_to_token(&self, id: u32) -> Option<String> { self.vocab.get(id as usize).map(|item| item.0.clone()) } fn save(&self, folder: &Path, name: Option<&str>) -> Result<Vec<PathBuf>> { let name = match name { Some(name) => format!("{}-unigram.json", name), None => "unigram.json".to_string(), }; let mut fullpath = PathBuf::new(); fullpath.push(folder); fullpath.push(name); let string = serde_json::to_string_pretty(self)?; std::fs::write(&fullpath, string)?; Ok(vec![fullpath]) } fn get_trainer(&self) -> Self::Trainer { UnigramTrainer::default() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_populate_nodes_unk() { let pieces = vec![("<unk>".to_string(), 0.0)]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 1); assert_eq!(lattice.begin_nodes[1].len(), 1); assert_eq!(lattice.begin_nodes[2].len(), 1); assert_eq!(lattice.begin_nodes[0][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 4); } #[test] fn test_populate_nodes() { let pieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.1), ("b".to_string(), 0.2), ("ab".to_string(), 0.3), ("bc".to_string(), 0.4), ]; let model = Unigram::from(pieces, Some(0), false).unwrap(); let mut lattice = Lattice::from("abc", model.bos_id, model.eos_id); model.populate_nodes(&mut lattice); assert_eq!(lattice.begin_nodes[0].len(), 2); // a, ab assert_eq!(lattice.begin_nodes[1].len(), 2); // b, bc assert_eq!(lattice.begin_nodes[2].len(), 1); // c(unk) // Id is the vocabulary id from Unigram model // node_id is simply the rank of the given node in the lattice. assert_eq!(lattice.begin_nodes[0][0].borrow().id, 1); assert_eq!(lattice.begin_nodes[0][1].borrow().id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().id, 2); assert_eq!(lattice.begin_nodes[1][1].borrow().id, 4); assert_eq!(lattice.begin_nodes[2][0].borrow().id, 0); assert_eq!(lattice.begin_nodes[0][0].borrow().node_id, 2); assert_eq!(lattice.begin_nodes[0][1].borrow().node_id, 3); assert_eq!(lattice.begin_nodes[1][0].borrow().node_id, 4); assert_eq!(lattice.begin_nodes[1][1].borrow().node_id, 5); assert_eq!(lattice.begin_nodes[2][0].borrow().node_id, 6); } #[test] fn test_encode() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("a".to_string(), 0.0), ("b".to_string(), 0.0), ("c".to_string(), 0.0), ("d".to_string(), 0.0), ("cd".to_string(), 1.0), ("ab".to_string(), 2.0), ("abc".to_string(), 5.0), ("abcd".to_string(), 10.0), ]; let model = Unigram::from(sentencepieces, Some(0), false).unwrap(); let result = model.encode("abcd").unwrap(); assert_eq!(result, vec!["abcd"]); } #[test] fn test_encode2() { let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("ab".to_string(), 0.0), ("cd".to_string(), -0.1), ("abc".to_string(), -0.2), ("a".to_string(), -0.3), ("b".to_string(), -0.4), ("c".to_string(), -0.5), ("ABC".to_string(), -0.5), ("abcdabcd".to_string(), 20.0), // User defined just max the scores. ("q".to_string(), 20.5), ("r".to_string(), 20.5), ("qr".to_string(), -0.5), ]; let mut model = Unigram::from(sentencepieces, Some(0), false).unwrap(); for is_optimized in &[true, false] { model.set_optimized(*is_optimized); println!("IsOptimized {:?}", is_optimized); assert_eq!(model.encode("abc").unwrap(), vec!["abc"]); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); model.set_fuse_unk(false); assert_eq!(model.encode("AB").unwrap(), vec!["A", "B"]); model.set_fuse_unk(true); assert_eq!(model.encode("AB").unwrap(), vec!["AB"]); assert_eq!(model.encode("abcd").unwrap(), vec!["ab", "cd"]); assert_eq!(model.encode("abcc").unwrap(), vec!["abc", "c"]); assert_eq!( model.encode("xabcabaabcdd").unwrap(), vec!["x", "abc", "ab", "a", "ab", "cd", "d"] ); model.set_fuse_unk(false); assert_eq!( model.encode("xyz東京").unwrap(), vec!["x", "y", "z", "東", "京"] ); model.set_fuse_unk(true); assert_eq!(model.encode("xyz東京").unwrap(), vec!["xyz東京"]); // User encoded in original version assert_eq!(model.encode("ABC").unwrap(), vec!["ABC"]); assert_eq!(model.encode("abABCcd").unwrap(), vec!["ab", "ABC", "cd"]); assert_eq!( model.encode("ababcdabcdcd").unwrap(), vec!["ab", "abcdabcd", "cd"] ); assert_eq!(model.encode("abqrcd").unwrap(), vec!["ab", "q", "r", "cd"]); } } #[test] fn test_unigram_bytefallback() { // In [97]: processor.encode_as_pieces("⅐⅛⅑ ") // Out[97]: ['▁', '<0xE2>', '<0x85>', '<0x90>', '⅛', '<0xE2>', '<0x85>', '<0x91>', '▁'] let sentencepieces = vec![ ("<unk>".to_string(), 0.0), ("<0xC3>".to_string(), -0.01), ("<0xA9>".to_string(), -0.03), ]; let unigram = Unigram::from(sentencepieces, Some(0), true).unwrap(); let tokens: Vec<Token> = unigram.tokenize("é").unwrap(); assert_eq!( tokens, [ Token { id: 1, value: "<0xC3>".to_string(), offsets: (0, 2) }, Token { id: 2, value: "<0xA9>".to_string(), offsets: (0, 2) } ] ); let tokens = unigram.tokenize("?é").unwrap(); assert_eq!(tokens[0].id, 0); } }
tokenizers/tokenizers/src/models/unigram/model.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/unigram/model.rs", "repo_id": "tokenizers", "token_count": 11900 }
228
use crate::tokenizer::{NormalizedString, Normalizer, Result}; use crate::utils::macro_rules_attribute; #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFD; impl Normalizer for NFD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKD; impl Normalizer for NFKD { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkd(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFC; impl Normalizer for NFC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfc(); Ok(()) } } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct NFKC; impl Normalizer for NFKC { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { normalized.nfkc(); Ok(()) } } fn do_nmt(normalized: &mut NormalizedString) { // Ascii Control characters normalized .filter(|c| { !matches!( c as u32, 0x0001..=0x0008 | 0x000B | 0x000E..=0x001F | 0x007F | 0x008F | 0x009F ) }) // Other code points considered as whitespace. .map(|c| match c as u32 { 0x0009 => ' ', 0x000A => ' ', 0x000C => ' ', 0x000D => ' ', 0x1680 => ' ', 0x200B..=0x200F => ' ', 0x2028 => ' ', 0x2029 => ' ', 0x2581 => ' ', 0xFEFF => ' ', 0xFFFD => ' ', _ => c, }); } #[derive(Default, Copy, Clone, Debug)] #[macro_rules_attribute(impl_serde_type!)] pub struct Nmt; impl Normalizer for Nmt { fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> { do_nmt(normalized); Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn test_nfkc() { let original = "\u{fb01}".to_string(); let normalized = "fi".to_string(); let mut n = NormalizedString::from(original.clone()); NFKC.normalize(&mut n).unwrap(); assert_eq!( n, NormalizedString::new(original, normalized, vec![(0, 3), (0, 3)], 0) ); assert_eq!(n.alignments_original(), vec![(0, 2), (0, 2), (0, 2)]); } }
tokenizers/tokenizers/src/normalizers/unicode.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/unicode.rs", "repo_id": "tokenizers", "token_count": 1317 }
229
pub mod bert; pub mod roberta; pub mod sequence; pub mod template; // Re-export these as processors pub use super::pre_tokenizers::byte_level; use serde::{Deserialize, Serialize}; use crate::pre_tokenizers::byte_level::ByteLevel; use crate::processors::bert::BertProcessing; use crate::processors::roberta::RobertaProcessing; use crate::processors::sequence::Sequence; use crate::processors::template::TemplateProcessing; use crate::{Encoding, PostProcessor, Result}; #[derive(Serialize, Deserialize, PartialEq, Debug, Clone, Eq)] #[serde(untagged)] pub enum PostProcessorWrapper { // Roberta must be before Bert for deserialization (serde does not validate tags) Roberta(RobertaProcessing), Bert(BertProcessing), ByteLevel(ByteLevel), Template(TemplateProcessing), Sequence(Sequence), } impl PostProcessor for PostProcessorWrapper { fn added_tokens(&self, is_pair: bool) -> usize { match self { Self::Bert(bert) => bert.added_tokens(is_pair), Self::ByteLevel(bl) => bl.added_tokens(is_pair), Self::Roberta(roberta) => roberta.added_tokens(is_pair), Self::Template(template) => template.added_tokens(is_pair), Self::Sequence(bl) => bl.added_tokens(is_pair), } } fn process_encodings( &self, encodings: Vec<Encoding>, add_special_tokens: bool, ) -> Result<Vec<Encoding>> { match self { Self::Bert(bert) => bert.process_encodings(encodings, add_special_tokens), Self::ByteLevel(bl) => bl.process_encodings(encodings, add_special_tokens), Self::Roberta(roberta) => roberta.process_encodings(encodings, add_special_tokens), Self::Template(template) => template.process_encodings(encodings, add_special_tokens), Self::Sequence(bl) => bl.process_encodings(encodings, add_special_tokens), } } } impl_enum_from!(BertProcessing, PostProcessorWrapper, Bert); impl_enum_from!(ByteLevel, PostProcessorWrapper, ByteLevel); impl_enum_from!(RobertaProcessing, PostProcessorWrapper, Roberta); impl_enum_from!(TemplateProcessing, PostProcessorWrapper, Template); impl_enum_from!(Sequence, PostProcessorWrapper, Sequence); #[cfg(test)] mod tests { use super::*; #[test] fn deserialize_bert_roberta_correctly() { let roberta = RobertaProcessing::default(); let roberta_r = r#"{ "type":"RobertaProcessing", "sep":["</s>",2], "cls":["<s>",0], "trim_offsets":true, "add_prefix_space":true }"# .replace(char::is_whitespace, ""); assert_eq!(serde_json::to_string(&roberta).unwrap(), roberta_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(&roberta_r).unwrap(), PostProcessorWrapper::Roberta(roberta) ); let bert = BertProcessing::default(); let bert_r = r#"{"type":"BertProcessing","sep":["[SEP]",102],"cls":["[CLS]",101]}"#; assert_eq!(serde_json::to_string(&bert).unwrap(), bert_r); assert_eq!( serde_json::from_str::<PostProcessorWrapper>(bert_r).unwrap(), PostProcessorWrapper::Bert(bert) ); } }
tokenizers/tokenizers/src/processors/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/processors/mod.rs", "repo_id": "tokenizers", "token_count": 1426 }
230
use crate::tokenizer::pattern::Pattern; use crate::{Offsets, Result}; use onig::Regex; use std::error::Error; #[derive(Debug)] pub struct SysRegex { regex: Regex, } impl SysRegex { pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> onig::FindMatches<'r, 't> { self.regex.find_iter(inside) } pub fn new( regex_str: &str, ) -> std::result::Result<Self, Box<dyn Error + Send + Sync + 'static>> { Ok(Self { regex: Regex::new(regex_str)?, }) } } impl Pattern for &Regex { fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> { if inside.is_empty() { return Ok(vec![((0, 0), false)]); } let mut prev = 0; let mut splits = Vec::with_capacity(inside.len()); for (start, end) in self.find_iter(inside) { if prev != start { splits.push(((prev, start), false)); } splits.push(((start, end), true)); prev = end; } if prev != inside.len() { splits.push(((prev, inside.len()), false)) } Ok(splits) } }
tokenizers/tokenizers/src/utils/onig.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/onig.rs", "repo_id": "tokenizers", "token_count": 571 }
231
[run] source=transformers omit = # skip convertion scripts from testing for now */convert_* */__main__.py [report] exclude_lines = pragma: no cover raise except register_parameter
transformers/.coveragerc/0
{ "file_path": "transformers/.coveragerc", "repo_id": "transformers", "token_count": 81 }
232
FROM rocm/dev-ubuntu-22.04:5.6 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.1.1' ARG TORCH_VISION='0.16.1' ARG TORCH_AUDIO='2.1.1' ARG ROCM='5.6' RUN apt update && \ apt install -y --no-install-recommends \ libaio-dev \ git \ # These are required to build deepspeed. python3-dev \ python-is-python3 \ rocrand-dev \ rocthrust-dev \ hipsparse-dev \ hipblas-dev \ rocblas-dev && \ apt clean && \ rm -rf /var/lib/apt/lists/* RUN python3 -m pip install --no-cache-dir --upgrade pip ninja "pydantic<2" RUN python3 -m pip uninstall -y apex torch torchvision torchaudio RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM --no-cache-dir # Pre-build DeepSpeed, so it's be ready for testing (to avoid timeout) RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache-dir -v --disable-pip-version-check 2>&1 ARG REF=main WORKDIR / # Invalidate docker cache from here if new commit is available. ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir ./transformers[accelerate,testing,sentencepiece,sklearn] # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile", "repo_id": "transformers", "token_count": 639 }
233
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Wie kann ich ein Modell zu 🤗 Transformers hinzufügen? Die 🤗 Transformers-Bibliothek ist dank der Beiträge der Community oft in der Lage, neue Modelle anzubieten. Aber das kann ein anspruchsvolles Projekt sein und erfordert eine eingehende Kenntnis der 🤗 Transformers-Bibliothek und des zu implementierenden Modells. Bei Hugging Face versuchen wir, mehr Mitgliedern der Community die Möglichkeit zu geben, aktiv Modelle hinzuzufügen, und wir haben diese Anleitung zusammengestellt, die Sie durch den Prozess des Hinzufügens eines PyTorch-Modells führt (stellen Sie sicher, dass Sie [PyTorch installiert haben](https://pytorch.org/get-started/locally/)). <Tip> Wenn Sie daran interessiert sind, ein TensorFlow-Modell zu implementieren, werfen Sie einen Blick in die Anleitung [How to convert a 🤗 Transformers model to TensorFlow](add_tensorflow_model)! </Tip> Auf dem Weg dorthin, werden Sie: - Einblicke in bewährte Open-Source-Verfahren erhalten - die Konstruktionsprinzipien hinter einer der beliebtesten Deep-Learning-Bibliotheken verstehen - lernen Sie, wie Sie große Modelle effizient testen können - lernen Sie, wie Sie Python-Hilfsprogramme wie `black`, `ruff` und `make fix-copies` integrieren, um sauberen und lesbaren Code zu gewährleisten Ein Mitglied des Hugging Face-Teams wird Ihnen dabei zur Seite stehen, damit Sie nicht alleine sind. 🤗 ❤️ Um loszulegen, öffnen Sie eine [New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml) Ausgabe für das Modell, das Sie in 🤗 Transformers sehen möchten. Wenn Sie nicht besonders wählerisch sind, wenn es darum geht, ein bestimmtes Modell beizusteuern, können Sie nach dem [New model label](https://github.com/huggingface/transformers/labels/New%20model) filtern, um zu sehen, ob es noch unbeanspruchte Modellanfragen gibt, und daran arbeiten. Sobald Sie eine neue Modellanfrage eröffnet haben, sollten Sie sich zunächst mit 🤗 Transformers vertraut machen, falls Sie das noch nicht sind! ## Allgemeiner Überblick über 🤗 Transformers Zunächst sollten Sie sich einen allgemeinen Überblick über 🤗 Transformers verschaffen. 🤗 Transformers ist eine sehr meinungsfreudige Bibliothek, es ist also möglich, dass Es besteht also die Möglichkeit, dass Sie mit einigen der Philosophien oder Designentscheidungen der Bibliothek nicht einverstanden sind. Aus unserer Erfahrung heraus haben wir jedoch dass die grundlegenden Designentscheidungen und Philosophien der Bibliothek entscheidend sind, um 🤗 Transformers effizient zu skalieren. Transformatoren zu skalieren und gleichzeitig die Wartungskosten auf einem vernünftigen Niveau zu halten. Ein guter erster Ansatzpunkt, um die Bibliothek besser zu verstehen, ist die Lektüre der [Dokumentation unserer Philosophie](Philosophie). Als Ergebnis unserer Arbeitsweise gibt es einige Entscheidungen, die wir versuchen, auf alle Modelle anzuwenden: - Komposition wird im Allgemeinen gegenüber Abstraktion bevorzugt - Die Duplizierung von Code ist nicht immer schlecht, wenn sie die Lesbarkeit oder Zugänglichkeit eines Modells stark verbessert - Modelldateien sind so in sich geschlossen wie möglich, so dass Sie, wenn Sie den Code eines bestimmten Modells lesen, idealerweise nur in die entsprechende Datei `modeling_....py` schauen müssen. Unserer Meinung nach ist der Code der Bibliothek nicht nur ein Mittel, um ein Produkt bereitzustellen, *z.B.* die Möglichkeit, BERT für Inferenz zu verwenden, sondern auch als das Produkt selbst, das wir verbessern wollen. Wenn Sie also ein Modell hinzufügen, ist der Benutzer nicht nur die Person, die Ihr Modell verwenden wird, sondern auch jeder, der Ihren Code liest, zu verstehen versucht und ihn möglicherweise verbessert. Lassen Sie uns daher ein wenig tiefer in das allgemeine Design der Bibliothek einsteigen. ### Überblick über die Modelle Um ein Modell erfolgreich hinzuzufügen, ist es wichtig, die Interaktion zwischen Ihrem Modell und seiner Konfiguration zu verstehen, [`PreTrainedModel`] und [`PretrainedConfig`]. Als Beispiel werden wir das Modell, das zu 🤗 Transformers hinzugefügt werden soll, `BrandNewBert` nennen. Schauen wir uns das mal an: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> Wie Sie sehen, machen wir in 🤗 Transformers von der Vererbung Gebrauch, aber wir beschränken die Abstraktionsebene auf ein absolutes Minimum. Minimum. Es gibt nie mehr als zwei Abstraktionsebenen für ein Modell in der Bibliothek. `BrandNewBertModel` erbt von `BrandNewBertPreTrainedModel`, das wiederum von [`PreTrainedModel`] erbt und das war's. In der Regel wollen wir sicherstellen, dass ein neues Modell nur von [`PreTrainedModel`] abhängt. Die wichtigen Funktionalitäten, die jedem neuen Modell automatisch zur Verfügung gestellt werden, sind Modell automatisch bereitgestellt werden, sind [`~PreTrainedModel.from_pretrained`] und [`~PreTrainedModel.save_pretrained`], die für die Serialisierung und Deserialisierung verwendet werden. Alle anderen wichtigen Funktionalitäten, wie `BrandNewBertModel.forward` sollten vollständig in der neuen Skript `modeling_brand_new_bert.py` definiert werden. Als nächstes wollen wir sicherstellen, dass ein Modell mit einer bestimmten Kopfebene, wie z.B. `BrandNewBertForMaskedLM` nicht von `BrandNewBertModel` erbt, sondern `BrandNewBertModel` verwendet als Komponente, die im Forward Pass aufgerufen werden kann, um die Abstraktionsebene niedrig zu halten. Jedes neue Modell erfordert eine Konfigurationsklasse, genannt `BrandNewBertConfig`. Diese Konfiguration wird immer als ein Attribut in [PreTrainedModel] gespeichert und kann daher über das Attribut `config` für alle Klassen aufgerufen werden die von `BrandNewBertPreTrainedModel` erben: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` Ähnlich wie das Modell erbt die Konfiguration grundlegende Serialisierungs- und Deserialisierungsfunktionalitäten von [`PretrainedConfig`]. Beachten Sie, dass die Konfiguration und das Modell immer in zwei verschiedene Formate serialisiert werden unterschiedliche Formate serialisiert werden - das Modell in eine *pytorch_model.bin* Datei und die Konfiguration in eine *config.json* Datei. Aufruf von [`~PreTrainedModel.save_pretrained`] wird automatisch [`~PretrainedConfig.save_pretrained`] auf, so dass sowohl das Modell als auch die Konfiguration gespeichert werden. ### Code-Stil Wenn Sie Ihr neues Modell kodieren, sollten Sie daran denken, dass Transformers eine Bibliothek mit vielen Meinungen ist und dass wir selbst ein paar Macken haben wie der Code geschrieben werden sollte :-) 1. Der Vorwärtsdurchlauf Ihres Modells sollte vollständig in die Modellierungsdatei geschrieben werden und dabei völlig unabhängig von anderen Modellen in der Bibliothek. Wenn Sie einen Block aus einem anderen Modell wiederverwenden möchten, kopieren Sie den Code und fügen ihn mit einem `# Kopiert von` ein (siehe [hier](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) für ein gutes Beispiel und [hier](pr_checks#check-copies) für weitere Dokumentation zu Copied from). 2. Der Code sollte vollständig verständlich sein, auch für einen Nicht-Muttersprachler. Das heißt, Sie sollten beschreibende Variablennamen wählen und Abkürzungen vermeiden. Ein Beispiel: `activation` ist `act` vorzuziehen. Von Variablennamen mit nur einem Buchstaben wird dringend abgeraten, es sei denn, es handelt sich um einen Index in einer for-Schleife. 3. Generell ziehen wir längeren expliziten Code einem kurzen magischen Code vor. 4. Vermeiden Sie die Unterklassifizierung von `nn.Sequential` in PyTorch, sondern unterklassifizieren Sie `nn.Module` und schreiben Sie den Vorwärtspass, so dass jeder so dass jeder, der Ihren Code verwendet, ihn schnell debuggen kann, indem er Druckanweisungen oder Haltepunkte hinzufügt. 5. Ihre Funktionssignatur sollte mit einer Typ-Annotation versehen sein. Im Übrigen sind gute Variablennamen viel lesbarer und verständlicher verständlicher als Typ-Anmerkungen. ### Übersicht der Tokenizer Noch nicht ganz fertig :-( Dieser Abschnitt wird bald hinzugefügt! ## Schritt-für-Schritt-Rezept zum Hinzufügen eines Modells zu 🤗 Transformers Jeder hat andere Vorlieben, was die Portierung eines Modells angeht. Daher kann es sehr hilfreich sein, wenn Sie sich Zusammenfassungen ansehen wie andere Mitwirkende Modelle auf Hugging Face portiert haben. Hier ist eine Liste von Blogbeiträgen aus der Community, wie man ein Modell portiert: 1. [Portierung eines GPT2-Modells](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) von [Thomas](https://huggingface.co/thomwolf) 2. [Portierung des WMT19 MT-Modells](https://huggingface.co/blog/porting-fsmt) von [Stas](https://huggingface.co/stas) Aus Erfahrung können wir Ihnen sagen, dass die wichtigsten Dinge, die Sie beim Hinzufügen eines Modells beachten müssen, sind: - Erfinden Sie das Rad nicht neu! Die meisten Teile des Codes, den Sie für das neue 🤗 Transformers-Modell hinzufügen werden, existieren bereits irgendwo in 🤗 Transformers. Nehmen Sie sich etwas Zeit, um ähnliche, bereits vorhandene Modelle und Tokenizer zu finden, die Sie kopieren können von. [grep](https://www.gnu.org/software/grep/) und [rg](https://github.com/BurntSushi/ripgrep) sind Ihre Freunde. Beachten Sie, dass es sehr gut möglich ist, dass der Tokenizer Ihres Modells auf einer Modellimplementierung basiert und und der Modellierungscode Ihres Modells auf einer anderen. *Z.B.* Der Modellierungscode von FSMT basiert auf BART, während der Tokenizer-Code von FSMT auf XLM basiert. - Es handelt sich eher um eine technische als um eine wissenschaftliche Herausforderung. Sie sollten mehr Zeit auf die Schaffung einer eine effiziente Debugging-Umgebung zu schaffen, als zu versuchen, alle theoretischen Aspekte des Modells in dem Papier zu verstehen. - Bitten Sie um Hilfe, wenn Sie nicht weiterkommen! Modelle sind der Kernbestandteil von 🤗 Transformers, so dass wir bei Hugging Face mehr als mehr als glücklich, Ihnen bei jedem Schritt zu helfen, um Ihr Modell hinzuzufügen. Zögern Sie nicht zu fragen, wenn Sie merken, dass Sie nicht weiterkommen. Fortschritte machen. Im Folgenden versuchen wir, Ihnen ein allgemeines Rezept an die Hand zu geben, das uns bei der Portierung eines Modells auf 🤗 Transformers am nützlichsten erschien. Die folgende Liste ist eine Zusammenfassung all dessen, was getan werden muss, um ein Modell hinzuzufügen und kann von Ihnen als To-Do verwendet werden Liste verwenden: ☐ (Optional) Verstehen der theoretischen Aspekte des Modells<br> ☐ Vorbereiten der 🤗 Transformers-Entwicklungsumgebung<br> ☐ Debugging-Umgebung des ursprünglichen Repositorys eingerichtet<br> ☐ Skript erstellt, das den Durchlauf `forward()` unter Verwendung des ursprünglichen Repositorys und des Checkpoints erfolgreich durchführt<br> ☐ Erfolgreich das Modellskelett zu 🤗 Transformers hinzugefügt<br> ☐ Erfolgreiche Umwandlung des ursprünglichen Prüfpunkts in den 🤗 Transformers-Prüfpunkt<br> ☐ Erfolgreich den Durchlauf `forward()` in 🤗 Transformers ausgeführt, der eine identische Ausgabe wie der ursprüngliche Prüfpunkt liefert<br> ☐ Modell-Tests in 🤗 Transformers abgeschlossen<br> ☐ Erfolgreich Tokenizer in 🤗 Transformers hinzugefügt<br> ☐ End-to-End-Integrationstests ausgeführt<br> ☐ Docs fertiggestellt<br> ☐ Modellgewichte in den Hub hochgeladen<br> ☐ Die Pull-Anfrage eingereicht<br> ☐ (Optional) Hinzufügen eines Demo-Notizbuchs Für den Anfang empfehlen wir in der Regel, mit einem guten theoretischen Verständnis von `BrandNewBert` zu beginnen. Wie auch immer, wenn Sie es vorziehen, die theoretischen Aspekte des Modells *on-the-job* zu verstehen, dann ist es völlig in Ordnung, direkt in die in die Code-Basis von `BrandNewBert` einzutauchen. Diese Option könnte für Sie besser geeignet sein, wenn Ihre technischen Fähigkeiten besser sind als als Ihre theoretischen Fähigkeiten, wenn Sie Schwierigkeiten haben, die Arbeit von `BrandNewBert` zu verstehen, oder wenn Sie einfach Spaß am Programmieren mehr Spaß am Programmieren haben als am Lesen wissenschaftlicher Abhandlungen. ### 1. (Optional) Theoretische Aspekte von BrandNewBert Sie sollten sich etwas Zeit nehmen, um die Abhandlung von *BrandNewBert* zu lesen, falls eine solche Beschreibung existiert. Möglicherweise gibt es große Abschnitte des Papiers, die schwer zu verstehen sind. Wenn das der Fall ist, ist das in Ordnung - machen Sie sich keine Sorgen! Das Ziel ist ist es nicht, ein tiefes theoretisches Verständnis des Papiers zu erlangen, sondern die notwendigen Informationen zu extrahieren, um das Modell effektiv in 🤗 Transformers zu implementieren. Das heißt, Sie müssen nicht zu viel Zeit auf die theoretischen Aspekten verbringen, sondern sich lieber auf die praktischen Aspekte konzentrieren, nämlich: - Welche Art von Modell ist *brand_new_bert*? BERT-ähnliches Modell nur für den Encoder? GPT2-ähnliches reines Decoder-Modell? BART-ähnliches Encoder-Decoder-Modell? Sehen Sie sich die [model_summary](model_summary) an, wenn Sie mit den Unterschieden zwischen diesen Modellen nicht vertraut sind. - Was sind die Anwendungen von *brand_new_bert*? Textklassifizierung? Texterzeugung? Seq2Seq-Aufgaben, *z.B.,* Zusammenfassungen? - Was ist die neue Eigenschaft des Modells, die es von BERT/GPT-2/BART unterscheidet? - Welches der bereits existierenden [🤗 Transformers-Modelle](https://huggingface.co/transformers/#contents) ist am ähnlichsten ähnlich wie *brand_new_bert*? - Welche Art von Tokenizer wird verwendet? Ein Satzteil-Tokenisierer? Ein Wortstück-Tokenisierer? Ist es derselbe Tokenisierer, der für für BERT oder BART? Nachdem Sie das Gefühl haben, einen guten Überblick über die Architektur des Modells erhalten zu haben, können Sie dem Hugging Face Team schreiben und Ihre Fragen stellen. Dazu können Fragen zur Architektur des Modells gehören, seiner Aufmerksamkeitsebene usw. Wir werden Ihnen gerne weiterhelfen. ### 2. Bereiten Sie als nächstes Ihre Umgebung vor 1. Forken Sie das [Repository](https://github.com/huggingface/transformers), indem Sie auf der Seite des Repositorys auf die Schaltfläche 'Fork' klicken. Seite des Repositorys klicken. Dadurch wird eine Kopie des Codes unter Ihrem GitHub-Benutzerkonto erstellt. 2. Klonen Sie Ihren `transformers` Fork auf Ihre lokale Festplatte und fügen Sie das Basis-Repository als Remote hinzu: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Richten Sie eine Entwicklungsumgebung ein, indem Sie z.B. den folgenden Befehl ausführen: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` Abhängig von Ihrem Betriebssystem und da die Anzahl der optionalen Abhängigkeiten von Transformers wächst, kann es sein, dass Sie bei diesem Befehl einen Fehler mit diesem Befehl. Stellen Sie in diesem Fall sicher, dass Sie das Deep Learning Framework, mit dem Sie arbeiten, installieren (PyTorch, TensorFlow und/oder Flax) und führen Sie es aus: ```bash pip install -e ".[quality]" ``` was für die meisten Anwendungsfälle ausreichend sein sollte. Sie können dann zum übergeordneten Verzeichnis zurückkehren ```bash cd .. ``` 4. Wir empfehlen, die PyTorch-Version von *brand_new_bert* zu Transformers hinzuzufügen. Um PyTorch zu installieren, folgen Sie bitte den Anweisungen auf https://pytorch.org/get-started/locally/. **Anmerkung:** Sie müssen CUDA nicht installiert haben. Es reicht aus, das neue Modell auf der CPU zum Laufen zu bringen. 5. Um *brand_new_bert* zu portieren, benötigen Sie außerdem Zugriff auf das Original-Repository: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Jetzt haben Sie eine Entwicklungsumgebung eingerichtet, um *brand_new_bert* auf 🤗 Transformers zu portieren. ### 3.-4. Führen Sie einen Pre-Training-Checkpoint mit dem Original-Repository durch Zunächst werden Sie mit dem ursprünglichen *brand_new_bert* Repository arbeiten. Oft ist die ursprüngliche Implementierung sehr "forschungslastig". Das bedeutet, dass es an Dokumentation mangeln kann und der Code schwer zu verstehen sein kann. Aber das sollte genau Ihre Motivation sein, *brand_new_bert* neu zu implementieren. Eines unserer Hauptziele bei Hugging Face ist es, *die Menschen dazu zu bringen auf den Schultern von Giganten zu stehen*, was sich hier sehr gut darin ausdrückt, dass wir ein funktionierendes Modell nehmen und es umschreiben, um es so es so **zugänglich, benutzerfreundlich und schön** wie möglich zu machen. Dies ist die wichtigste Motivation für die Neuimplementierung von Modelle in 🤗 Transformers umzuwandeln - der Versuch, komplexe neue NLP-Technologie für **jeden** zugänglich zu machen. Sie sollten damit beginnen, indem Sie in das Original-Repository eintauchen. Die erfolgreiche Ausführung des offiziellen Pre-Trainingsmodells im Original-Repository ist oft **der schwierigste** Schritt. Unserer Erfahrung nach ist es sehr wichtig, dass Sie einige Zeit damit verbringen, sich mit der ursprünglichen Code-Basis vertraut zu machen. Sie müssen das Folgende herausfinden: - Wo finden Sie die vortrainierten Gewichte? - Wie lädt man die vorab trainierten Gewichte in das entsprechende Modell? - Wie kann der Tokenizer unabhängig vom Modell ausgeführt werden? - Verfolgen Sie einen Forward Pass, damit Sie wissen, welche Klassen und Funktionen für einen einfachen Forward Pass erforderlich sind. Normalerweise, müssen Sie nur diese Funktionen reimplementieren. - Sie müssen in der Lage sein, die wichtigen Komponenten des Modells zu finden: Wo befindet sich die Klasse des Modells? Gibt es Unterklassen des Modells, *z.B.* EncoderModel, DecoderModel? Wo befindet sich die Selbstaufmerksamkeitsschicht? Gibt es mehrere verschiedene Aufmerksamkeitsebenen, *z.B.* *Selbstaufmerksamkeit*, *Kreuzaufmerksamkeit*...? - Wie können Sie das Modell in der ursprünglichen Umgebung des Repo debuggen? Müssen Sie *print* Anweisungen hinzufügen, können Sie mit einem interaktiven Debugger wie *ipdb* arbeiten oder sollten Sie eine effiziente IDE zum Debuggen des Modells verwenden, wie z.B. PyCharm? Es ist sehr wichtig, dass Sie, bevor Sie mit der Portierung beginnen, den Code im Original-Repository **effizient** debuggen können Repository können! Denken Sie auch daran, dass Sie mit einer Open-Source-Bibliothek arbeiten, also zögern Sie nicht, ein Problem oder oder sogar eine Pull-Anfrage im Original-Repository zu stellen. Die Betreuer dieses Repositorys sind wahrscheinlich sehr froh darüber dass jemand in ihren Code schaut! An diesem Punkt liegt es wirklich an Ihnen, welche Debugging-Umgebung und Strategie Sie zum Debuggen des ursprünglichen Modell zu debuggen. Wir raten dringend davon ab, eine kostspielige GPU-Umgebung einzurichten, sondern arbeiten Sie einfach auf einer CPU, sowohl wenn Sie mit dem in das ursprüngliche Repository einzutauchen und auch, wenn Sie beginnen, die 🤗 Transformers-Implementierung des Modells zu schreiben. Nur ganz am Ende, wenn das Modell bereits erfolgreich auf 🤗 Transformers portiert wurde, sollte man überprüfen, ob das Modell auch auf der GPU wie erwartet funktioniert. Im Allgemeinen gibt es zwei mögliche Debugging-Umgebungen für die Ausführung des Originalmodells - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Lokale Python-Skripte. Jupyter-Notebooks haben den Vorteil, dass sie eine zellenweise Ausführung ermöglichen, was hilfreich sein kann, um logische Komponenten besser voneinander zu trennen und logische Komponenten voneinander zu trennen und schnellere Debugging-Zyklen zu haben, da Zwischenergebnisse gespeichert werden können. Außerdem, Außerdem lassen sich Notebooks oft leichter mit anderen Mitwirkenden teilen, was sehr hilfreich sein kann, wenn Sie das Hugging Face Team um Hilfe bitten möchten. Face Team um Hilfe bitten. Wenn Sie mit Jupyter-Notizbüchern vertraut sind, empfehlen wir Ihnen dringend, mit ihnen zu arbeiten. Der offensichtliche Nachteil von Jupyter-Notizbüchern ist, dass Sie, wenn Sie nicht daran gewöhnt sind, mit ihnen zu arbeiten, einige Zeit damit verbringen müssen einige Zeit damit verbringen müssen, sich an die neue Programmierumgebung zu gewöhnen, und dass Sie möglicherweise Ihre bekannten Debugging-Tools nicht mehr verwenden können wie z.B. `ipdb` nicht mehr verwenden können. Für jede Codebasis ist es immer ein guter erster Schritt, einen **kleinen** vortrainierten Checkpoint zu laden und in der Lage zu sein, einen einzelnen Vorwärtsdurchlauf mit einem Dummy-Integer-Vektor von Eingabe-IDs als Eingabe zu reproduzieren. Ein solches Skript könnte wie folgt aussehen (in Pseudocode): ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Was die Debugging-Strategie anbelangt, so können Sie im Allgemeinen aus mehreren Strategien wählen: - Zerlegen Sie das ursprüngliche Modell in viele kleine testbare Komponenten und führen Sie für jede dieser Komponenten einen Vorwärtsdurchlauf zur Überprüfung - Zerlegen Sie das ursprüngliche Modell nur in den ursprünglichen *Tokenizer* und das ursprüngliche *Modell*, führen Sie einen Vorwärtsdurchlauf für diese Komponenten durch und verwenden Sie dazwischenliegende Druckanweisungen oder Haltepunkte zur Überprüfung. Auch hier bleibt es Ihnen überlassen, welche Strategie Sie wählen. Oft ist die eine oder die andere Strategie vorteilhaft, je nach der ursprünglichen Codebasis Basis. Wenn die ursprüngliche Codebasis es Ihnen erlaubt, das Modell in kleinere Teilkomponenten zu zerlegen, *z.B.* wenn die ursprüngliche Code-Basis problemlos im Eager-Modus ausgeführt werden kann, lohnt es sich in der Regel, dies zu tun. Es gibt einige wichtige Vorteile am Anfang den schwierigeren Weg zu gehen: - Wenn Sie später das ursprüngliche Modell mit der Hugging Face-Implementierung vergleichen, können Sie automatisch überprüfen, ob für jede Komponente einzeln überprüfen, ob die entsprechende Komponente der 🤗 Transformers-Implementierung übereinstimmt, anstatt sich auf anstatt sich auf den visuellen Vergleich über Druckanweisungen zu verlassen - können Sie das große Problem der Portierung eines Modells in kleinere Probleme der Portierung einzelner Komponenten zerlegen einzelnen Komponenten zu zerlegen und so Ihre Arbeit besser zu strukturieren - Die Aufteilung des Modells in logisch sinnvolle Komponenten hilft Ihnen, einen besseren Überblick über das Design des Modells zu bekommen und somit das Modell besser zu verstehen - In einem späteren Stadium helfen Ihnen diese komponentenweisen Tests dabei, sicherzustellen, dass keine Regressionen auftreten, während Sie fortfahren Ihren Code ändern [Lysandre's](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) Integrationstests für ELECTRA gibt ein schönes Beispiel dafür, wie dies geschehen kann. Wenn die ursprüngliche Codebasis jedoch sehr komplex ist oder nur die Ausführung von Zwischenkomponenten in einem kompilierten Modus erlaubt, könnte es zu zeitaufwändig oder sogar unmöglich sein, das Modell in kleinere testbare Teilkomponenten zu zerlegen. Ein gutes Beispiel ist die [T5's MeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow) Bibliothek, die sehr komplex ist sehr komplex ist und keine einfache Möglichkeit bietet, das Modell in seine Unterkomponenten zu zerlegen. Bei solchen Bibliotheken ist man oft auf die Überprüfung von Druckanweisungen angewiesen. Unabhängig davon, welche Strategie Sie wählen, ist die empfohlene Vorgehensweise oft die gleiche, nämlich dass Sie mit der Fehlersuche in den die Anfangsebenen zuerst und die Endebenen zuletzt debuggen. Es wird empfohlen, dass Sie die Ausgaben der folgenden Ebenen abrufen, entweder durch Druckanweisungen oder Unterkomponentenfunktionen Schichten in der folgenden Reihenfolge abrufen: 1. Rufen Sie die Eingabe-IDs ab, die an das Modell übergeben wurden 2. Rufen Sie die Worteinbettungen ab 3. Rufen Sie die Eingabe der ersten Transformer-Schicht ab 4. Rufen Sie die Ausgabe der ersten Transformer-Schicht ab 5. Rufen Sie die Ausgabe der folgenden n - 1 Transformer-Schichten ab 6. Rufen Sie die Ausgabe des gesamten BrandNewBert Modells ab Die Eingabe-IDs sollten dabei aus einem Array von Ganzzahlen bestehen, *z.B.* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` Die Ausgaben der folgenden Schichten bestehen oft aus mehrdimensionalen Float-Arrays und können wie folgt aussehen: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` Wir erwarten, dass jedes zu 🤗 Transformers hinzugefügte Modell eine Reihe von Integrationstests besteht, was bedeutet, dass das ursprüngliche Modell und die neu implementierte Version in 🤗 Transformers exakt dieselbe Ausgabe liefern müssen, und zwar mit einer Genauigkeit von 0,001! Da es normal ist, dass das exakt gleiche Modell, das in verschiedenen Bibliotheken geschrieben wurde, je nach Bibliotheksrahmen eine leicht unterschiedliche Ausgabe liefern kann eine leicht unterschiedliche Ausgabe liefern kann, akzeptieren wir eine Fehlertoleranz von 1e-3 (0,001). Es reicht nicht aus, wenn das Modell fast das gleiche Ergebnis liefert, sie müssen fast identisch sein. Daher werden Sie sicherlich die Zwischenergebnisse Zwischenergebnisse der 🤗 Transformers-Version mehrfach mit den Zwischenergebnissen der ursprünglichen Implementierung von *brand_new_bert* vergleichen. In diesem Fall ist eine **effiziente** Debugging-Umgebung des ursprünglichen Repositorys absolut wichtig ist. Hier sind einige Ratschläge, um Ihre Debugging-Umgebung so effizient wie möglich zu gestalten. - Finden Sie den besten Weg, um Zwischenergebnisse zu debuggen. Ist das ursprüngliche Repository in PyTorch geschrieben? Dann sollten Sie dann sollten Sie sich wahrscheinlich die Zeit nehmen, ein längeres Skript zu schreiben, das das ursprüngliche Modell in kleinere Unterkomponenten zerlegt, um Zwischenwerte abzurufen. Ist das ursprüngliche Repository in Tensorflow 1 geschrieben? Dann müssen Sie sich möglicherweise auf die TensorFlow Druckoperationen wie [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) verlassen, um die Zwischenwerte auszugeben. Ist das ursprüngliche Repository in Jax geschrieben? Dann stellen Sie sicher, dass das Modell **nicht jitted** ist, wenn wenn Sie den Vorwärtsdurchlauf ausführen, *z.B.* schauen Sie sich [dieser Link](https://github.com/google/jax/issues/196) an. - Verwenden Sie den kleinsten vortrainierten Prüfpunkt, den Sie finden können. Je kleiner der Prüfpunkt ist, desto schneller wird Ihr Debugging-Zyklus wird. Es ist nicht effizient, wenn Ihr vorab trainiertes Modell so groß ist, dass Ihr Vorwärtsdurchlauf mehr als 10 Sekunden dauert. Falls nur sehr große Checkpoints verfügbar sind, kann es sinnvoller sein, ein Dummy-Modell in der neuen Umgebung mit zufällig initialisierten Gewichten zu erstellen und diese Gewichte zum Vergleich mit der 🤗 Transformers-Version Ihres Modells - Vergewissern Sie sich, dass Sie den einfachsten Weg wählen, um einen Forward Pass im ursprünglichen Repository aufzurufen. Idealerweise sollten Sie die Funktion im originalen Repository finden, die **nur** einen einzigen Vorwärtspass aufruft, *d.h.* die oft aufgerufen wird Vorhersagen", "Auswerten", "Vorwärts" oder "Aufruf" genannt wird. Sie wollen keine Funktion debuggen, die `forward` aufruft mehrfach aufruft, *z.B.* um Text zu erzeugen, wie `autoregressive_sample`, `generate`. - Versuchen Sie, die Tokenisierung vom *Forward*-Pass des Modells zu trennen. Wenn das Original-Repository Beispiele zeigt, bei denen Sie eine Zeichenkette eingeben müssen, dann versuchen Sie herauszufinden, an welcher Stelle im Vorwärtsaufruf die Zeichenketteneingabe in Eingabe-IDs geändert wird geändert wird und beginnen Sie an dieser Stelle. Das könnte bedeuten, dass Sie möglicherweise selbst ein kleines Skript schreiben oder den Originalcode so ändern müssen, dass Sie die ids direkt eingeben können, anstatt eine Zeichenkette einzugeben. - Vergewissern Sie sich, dass sich das Modell in Ihrem Debugging-Setup **nicht** im Trainingsmodus befindet, der oft dazu führt, dass das Modell Dies führt häufig zu zufälligen Ergebnissen, da das Modell mehrere Dropout-Schichten enthält. Stellen Sie sicher, dass der Vorwärtsdurchlauf in Ihrer Debugging Umgebung **deterministisch** ist, damit die Dropout-Schichten nicht verwendet werden. Oder verwenden Sie *transformers.utils.set_seed*. wenn sich die alte und die neue Implementierung im selben Framework befinden. Im folgenden Abschnitt finden Sie genauere Details/Tipps, wie Sie dies für *brand_new_bert* tun können. ### 5.-14. Portierung von BrandNewBert auf 🤗 Transformatoren Als nächstes können Sie endlich damit beginnen, neuen Code zu 🤗 Transformers hinzuzufügen. Gehen Sie in den Klon Ihres 🤗 Transformers Forks: ```bash cd transformers ``` In dem speziellen Fall, dass Sie ein Modell hinzufügen, dessen Architektur genau mit der Modellarchitektur eines Modells übereinstimmt, müssen Sie nur ein Konvertierungsskript hinzufügen, wie in [diesem Abschnitt](#write-a-conversion-script) beschrieben. In diesem Fall können Sie einfach die gesamte Modellarchitektur des bereits vorhandenen Modells wiederverwenden. Andernfalls beginnen wir mit der Erstellung eines neuen Modells. Sie haben hier zwei Möglichkeiten: - `transformers-cli add-new-model-like`, um ein neues Modell wie ein bestehendes hinzuzufügen - `transformers-cli add-new-model`, um ein neues Modell aus unserer Vorlage hinzuzufügen (sieht dann aus wie BERT oder Bart, je nachdem, welche Art von Modell Sie wählen) In beiden Fällen werden Sie mit einem Fragebogen aufgefordert, die grundlegenden Informationen zu Ihrem Modell auszufüllen. Für den zweiten Befehl müssen Sie `cookiecutter` installieren, weitere Informationen dazu finden Sie [hier](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). **Eröffnen Sie einen Pull Request auf dem Haupt-Repositorium huggingface/transformers** Bevor Sie mit der Anpassung des automatisch generierten Codes beginnen, ist es nun an der Zeit, einen "Work in progress (WIP)" Pull Anfrage, *z.B.* "[WIP] Add *brand_new_bert*", in 🤗 Transformers zu öffnen, damit Sie und das Hugging Face Team Seite an Seite an der Integration des Modells in 🤗 Transformers arbeiten können. Sie sollten Folgendes tun: 1. Erstellen Sie eine Verzweigung mit einem beschreibenden Namen von Ihrer Hauptverzweigung ```bash git checkout -b add_brand_new_bert ``` 2. Bestätigen Sie den automatisch generierten Code: ```bash git add . git commit ``` 3. Abrufen und zurücksetzen auf die aktuelle Haupt ```bash git fetch upstream git rebase upstream/main ``` 4. Übertragen Sie die Änderungen auf Ihr Konto mit: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Wenn Sie zufrieden sind, gehen Sie auf die Webseite Ihrer Abspaltung auf GitHub. Klicken Sie auf "Pull request". Stellen Sie sicher, dass Sie das GitHub-Handle einiger Mitglieder des Hugging Face-Teams als Reviewer hinzuzufügen, damit das Hugging Face-Team über zukünftige Änderungen informiert wird. zukünftige Änderungen benachrichtigt wird. 6. Ändern Sie den PR in einen Entwurf, indem Sie auf der rechten Seite der GitHub-Pull-Request-Webseite auf "In Entwurf umwandeln" klicken. Vergessen Sie im Folgenden nicht, wenn Sie Fortschritte gemacht haben, Ihre Arbeit zu committen und in Ihr Konto zu pushen, damit sie in der Pull-Anfrage erscheint. damit sie in der Pull-Anfrage angezeigt wird. Außerdem sollten Sie darauf achten, dass Sie Ihre Arbeit von Zeit zu Zeit mit dem aktuellen main von Zeit zu Zeit zu aktualisieren, indem Sie dies tun: ```bash git fetch upstream git merge upstream/main ``` Generell sollten Sie alle Fragen, die Sie in Bezug auf das Modell oder Ihre Implementierung haben, in Ihrem PR stellen und in der PR diskutiert/gelöst werden. Auf diese Weise wird das Hugging Face Team immer benachrichtigt, wenn Sie neuen Code einreichen oder wenn Sie eine Frage haben. Es ist oft sehr hilfreich, das Hugging Face-Team auf Ihren hinzugefügten Code hinzuweisen, damit das Hugging Face-Team Ihr Problem oder Ihre Frage besser verstehen kann. Face-Team Ihr Problem oder Ihre Frage besser verstehen kann. Gehen Sie dazu auf die Registerkarte "Geänderte Dateien", auf der Sie alle Ihre Änderungen sehen, gehen Sie zu einer Zeile, zu der Sie eine Frage stellen möchten eine Frage stellen möchten, und klicken Sie auf das "+"-Symbol, um einen Kommentar hinzuzufügen. Wenn eine Frage oder ein Problem gelöst wurde, können Sie auf die Schaltfläche "Lösen" des erstellten Kommentars klicken. Auf dieselbe Weise wird das Hugging Face-Team Kommentare öffnen, wenn es Ihren Code überprüft. Wir empfehlen, die meisten Fragen auf GitHub in Ihrem PR zu stellen. Für einige sehr allgemeine Fragen, die für die Öffentlichkeit nicht sehr nützlich sind, können Sie das Hugging Face Team per Slack oder E-Mail zu stellen. **5. Passen Sie den Code der generierten Modelle für brand_new_bert** an. Zunächst werden wir uns nur auf das Modell selbst konzentrieren und uns nicht um den Tokenizer kümmern. Den gesamten relevanten Code sollten Sie finden Sie in den generierten Dateien `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` und `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Jetzt können Sie endlich mit dem Programmieren beginnen :). Der generierte Code in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` wird entweder die gleiche Architektur wie BERT haben, wenn wenn es sich um ein reines Encoder-Modell handelt oder BART, wenn es sich um ein Encoder-Decoder-Modell handelt. An diesem Punkt sollten Sie sich daran erinnern, was was Sie am Anfang über die theoretischen Aspekte des Modells gelernt haben: *Wie unterscheidet sich das Modell von BERT oder BART?*". Implementieren Sie diese Änderungen, was oft bedeutet, dass Sie die *Selbstaufmerksamkeitsschicht*, die Reihenfolge der Normalisierungsschicht usw. ändern müssen. Schicht usw... Auch hier ist es oft nützlich, sich die ähnliche Architektur bereits bestehender Modelle in Transformers anzusehen, um ein besseres Gefühl dafür zu bekommen ein besseres Gefühl dafür zu bekommen, wie Ihr Modell implementiert werden sollte. **Beachten Sie**, dass Sie an diesem Punkt nicht sehr sicher sein müssen, dass Ihr Code völlig korrekt oder sauber ist. Vielmehr ist es Sie sollten vielmehr eine erste *unbereinigte*, kopierte Version des ursprünglichen Codes in src/transformers/models/brand_new_bert/modeling_brand_new_bert.py" hinzuzufügen, bis Sie das Gefühl haben, dass der gesamte notwendige Code hinzugefügt wurde. Unserer Erfahrung nach ist es viel effizienter, schnell eine erste Version des erforderlichen Codes hinzuzufügen und den Code iterativ mit dem Konvertierungsskript zu verbessern/korrigieren, wie im nächsten Abschnitt beschrieben. Das einzige, was zu diesem Zeitpunkt funktionieren muss, ist, dass Sie die 🤗 Transformers-Implementierung von *brand_new_bert* instanziieren können, *d.h.* der folgende Befehl sollte funktionieren: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` Der obige Befehl erstellt ein Modell gemäß den Standardparametern, die in `BrandNewBertConfig()` definiert sind, mit zufälligen Gewichten und stellt damit sicher, dass die `init()` Methoden aller Komponenten funktionieren. Beachten Sie, dass alle zufälligen Initialisierungen in der Methode `_init_weights` Ihres `BrandnewBertPreTrainedModel` stattfinden sollten. Klasse erfolgen sollte. Sie sollte alle Blattmodule in Abhängigkeit von den Variablen der Konfiguration initialisieren. Hier ist ein Beispiel mit der BERT `_init_weights` Methode: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` Sie können weitere benutzerdefinierte Schemata verwenden, wenn Sie eine spezielle Initialisierung für einige Module benötigen. Zum Beispiel in `Wav2Vec2ForPreTraining` müssen die letzten beiden linearen Schichten die Initialisierung des regulären PyTorch `nn.Linear` haben. aber alle anderen sollten eine Initialisierung wie oben verwenden. Dies ist wie folgt kodiert: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` Das Flag `_is_hf_initialized` wird intern verwendet, um sicherzustellen, dass wir ein Submodul nur einmal initialisieren. Wenn Sie es auf `True` für `module.project_q` und `module.project_hid` setzen, stellen wir sicher, dass die benutzerdefinierte Initialisierung, die wir vorgenommen haben, später nicht überschrieben wird, die Funktion `_init_weights` nicht auf sie angewendet wird. **6. Schreiben Sie ein Konvertierungsskript** Als nächstes sollten Sie ein Konvertierungsskript schreiben, mit dem Sie den Checkpoint, den Sie zum Debuggen von *brand_new_bert* im im ursprünglichen Repository in einen Prüfpunkt konvertieren, der mit Ihrer gerade erstellten 🤗 Transformers-Implementierung von *brand_new_bert*. Es ist nicht ratsam, das Konvertierungsskript von Grund auf neu zu schreiben, sondern die bereits bestehenden Konvertierungsskripten in 🤗 Transformers nach einem Skript zu suchen, das für die Konvertierung eines ähnlichen Modells verwendet wurde, das im demselben Framework wie *brand_new_bert* geschrieben wurde. Normalerweise reicht es aus, ein bereits vorhandenes Konvertierungsskript zu kopieren und es für Ihren Anwendungsfall leicht anzupassen. Zögern Sie nicht, das Hugging Face Team zu bitten, Sie auf ein ähnliches, bereits vorhandenes Konvertierungsskript für Ihr Modell zu finden. - Wenn Sie ein Modell von TensorFlow nach PyTorch portieren, ist ein guter Ausgangspunkt das Konvertierungsskript von BERT [hier](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - Wenn Sie ein Modell von PyTorch nach PyTorch portieren, ist ein guter Ausgangspunkt das Konvertierungsskript von BART [hier](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) Im Folgenden werden wir kurz erklären, wie PyTorch-Modelle Ebenengewichte speichern und Ebenennamen definieren. In PyTorch wird der Name einer Ebene durch den Namen des Klassenattributs definiert, das Sie der Ebene geben. Lassen Sie uns ein Dummy-Modell in PyTorch, das wir `SimpleModel` nennen, wie folgt: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Jetzt können wir eine Instanz dieser Modelldefinition erstellen, die alle Gewichte ausfüllt: `dense`, `intermediate`, `layer_norm` mit zufälligen Gewichten. Wir können das Modell ausdrucken, um seine Architektur zu sehen ```python model = SimpleModel() print(model) ``` Dies gibt folgendes aus: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` Wir können sehen, dass die Ebenennamen durch den Namen des Klassenattributs in PyTorch definiert sind. Sie können die Gewichtswerte Werte einer bestimmten Ebene anzeigen lassen: ```python print(model.dense.weight.data) ``` um zu sehen, dass die Gewichte zufällig initialisiert wurden ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` Im Konvertierungsskript sollten Sie diese zufällig initialisierten Gewichte mit den genauen Gewichten der entsprechenden Ebene im Kontrollpunkt. *Z.B.* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` Dabei müssen Sie sicherstellen, dass jedes zufällig initialisierte Gewicht Ihres PyTorch-Modells und sein entsprechendes Checkpoint-Gewicht in **Form und Name** genau übereinstimmen. Zu diesem Zweck ist es **notwendig**, assert Anweisungen für die Form hinzuzufügen und die Namen der Checkpoint-Gewichte auszugeben. Sie sollten z.B. Anweisungen hinzufügen wie: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Außerdem sollten Sie die Namen der beiden Gewichte ausdrucken, um sicherzustellen, dass sie übereinstimmen, *z.B.*. ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` Wenn entweder die Form oder der Name nicht übereinstimmt, haben Sie wahrscheinlich das falsche Kontrollpunktgewicht einer zufällig Ebene der 🤗 Transformers-Implementierung zugewiesen. Eine falsche Form ist höchstwahrscheinlich auf eine falsche Einstellung der Konfigurationsparameter in `BrandNewBertConfig()` zurückzuführen, die nicht genau mit denen übereinstimmen, die für den zu konvertierenden Prüfpunkt verwendet wurden. Es könnte aber auch sein, dass die PyTorch-Implementierung eines Layers erfordert, dass das Gewicht vorher transponiert wird. Schließlich sollten Sie auch überprüfen, ob **alle** erforderlichen Gewichte initialisiert sind und alle Checkpoint-Gewichte ausgeben, die die nicht zur Initialisierung verwendet wurden, um sicherzustellen, dass das Modell korrekt konvertiert wurde. Es ist völlig normal, dass die Konvertierungsversuche entweder mit einer falschen Shape-Anweisung oder einer falschen Namenszuweisung fehlschlagen. Das liegt höchstwahrscheinlich daran, dass entweder Sie haben falsche Parameter in `BrandNewBertConfig()` verwendet, haben eine falsche Architektur in der 🤗 Transformers Implementierung, Sie haben einen Fehler in den `init()` Funktionen einer der Komponenten der 🤗 Transformers Implementierung oder Sie müssen eine der Kontrollpunktgewichte transponieren. Dieser Schritt sollte mit dem vorherigen Schritt wiederholt werden, bis alle Gewichte des Kontrollpunkts korrekt in das Transformers-Modell geladen sind. Nachdem Sie den Prüfpunkt korrekt in die 🤗 Transformers-Implementierung geladen haben, können Sie das Modell das Modell unter einem Ordner Ihrer Wahl `/path/to/converted/checkpoint/folder` speichern, der dann sowohl ein Datei `pytorch_model.bin` und eine Datei `config.json` enthalten sollte: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implementieren Sie den Vorwärtspass** Nachdem es Ihnen gelungen ist, die trainierten Gewichte korrekt in die 🤗 Transformers-Implementierung zu laden, sollten Sie nun dafür sorgen sicherstellen, dass der Forward Pass korrekt implementiert ist. In [Machen Sie sich mit dem ursprünglichen Repository vertraut](#3-4-führen-sie-einen-pre-training-checkpoint-mit-dem-original-repository-durch) haben Sie bereits ein Skript erstellt, das einen Forward Pass Durchlauf des Modells unter Verwendung des Original-Repositorys durchführt. Jetzt sollten Sie ein analoges Skript schreiben, das die 🤗 Transformers Implementierung anstelle der Originalimplementierung verwenden. Es sollte wie folgt aussehen: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` Es ist sehr wahrscheinlich, dass die 🤗 Transformers-Implementierung und die ursprüngliche Modell-Implementierung nicht genau die gleiche Ausgabe liefern. beim ersten Mal nicht die gleiche Ausgabe liefern oder dass der Vorwärtsdurchlauf einen Fehler auslöst. Seien Sie nicht enttäuscht - das ist zu erwarten! Erstens, sollten Sie sicherstellen, dass der Vorwärtsdurchlauf keine Fehler auslöst. Es passiert oft, dass die falschen Dimensionen verwendet werden verwendet werden, was zu einem *Dimensionality mismatch* Fehler führt oder dass der falsche Datentyp verwendet wird, *z.B.* `torch.long` anstelle von `torch.float32`. Zögern Sie nicht, das Hugging Face Team um Hilfe zu bitten, wenn Sie bestimmte Fehler nicht lösen können. bestimmte Fehler nicht lösen können. Um sicherzustellen, dass die Implementierung von 🤗 Transformers korrekt funktioniert, müssen Sie sicherstellen, dass die Ausgaben einer Genauigkeit von `1e-3` entsprechen. Zunächst sollten Sie sicherstellen, dass die Ausgabeformen identisch sind, *d.h.*. Die Ausgabeform *outputs.shape* sollte für das Skript der 🤗 Transformers-Implementierung und die ursprüngliche Implementierung ergeben. Als nächstes sollten Sie sicherstellen, dass auch die Ausgabewerte identisch sind. Dies ist einer der schwierigsten Teile des Hinzufügens eines neuen Modells. Häufige Fehler, warum die Ausgaben nicht identisch sind, sind: - Einige Ebenen wurden nicht hinzugefügt, *d.h.* eine *Aktivierungsebene* wurde nicht hinzugefügt, oder die Restverbindung wurde vergessen - Die Worteinbettungsmatrix wurde nicht gebunden - Es werden die falschen Positionseinbettungen verwendet, da die ursprüngliche Implementierung einen Offset verwendet - Dropout wird während des Vorwärtsdurchlaufs angewendet. Um dies zu beheben, stellen Sie sicher, dass *model.training auf False* steht und dass keine Dropout Schicht während des Vorwärtsdurchlaufs fälschlicherweise aktiviert wird, *d.h.* übergeben Sie *self.training* an [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) Der beste Weg, das Problem zu beheben, besteht normalerweise darin, sich den Vorwärtsdurchlauf der ursprünglichen Implementierung und die 🤗 Transformers-Implementierung nebeneinander zu sehen und zu prüfen, ob es Unterschiede gibt. Idealerweise sollten Sie die Zwischenergebnisse beider Implementierungen des Vorwärtsdurchlaufs debuggen/ausdrucken, um die genaue Position im Netzwerk zu finden, an der die 🤗 Transformers-Implementierung eine andere Ausgabe zeigt als die ursprüngliche Implementierung. Stellen Sie zunächst sicher, dass die hartcodierten `input_ids` in beiden Skripten identisch sind. Überprüfen Sie dann, ob die Ausgaben der ersten Transformation von der `input_ids` (normalerweise die Worteinbettungen) identisch sind. Und dann arbeiten Sie sich bis zur allerletzten Schicht des Netzwerks. Irgendwann werden Sie einen Unterschied zwischen den beiden Implementierungen feststellen, der Sie auf den Fehler in der Implementierung von 🤗 Transformers hinweist. Unserer Erfahrung nach ist ein einfacher und effizienter Weg, viele Druckanweisungen hinzuzufügen sowohl in der Original-Implementierung als auch in der 🤗 Transformers-Implementierung an den gleichen Stellen im Netzwerk hinzuzufügen und nacheinander Druckanweisungen zu entfernen, die dieselben Werte für Zwischenpräsentationen anzeigen. Wenn Sie sicher sind, dass beide Implementierungen die gleiche Ausgabe liefern, überprüfen Sie die Ausgaben mit `torch.allclose(original_output, output, atol=1e-3)` überprüfen, haben Sie den schwierigsten Teil hinter sich! Herzlichen Glückwunsch - die Arbeit, die noch zu erledigen ist, sollte ein Kinderspiel sein 😊. **8. Hinzufügen aller notwendigen Modelltests** An diesem Punkt haben Sie erfolgreich ein neues Modell hinzugefügt. Es ist jedoch sehr gut möglich, dass das Modell noch nicht noch nicht vollständig mit dem erforderlichen Design übereinstimmt. Um sicherzustellen, dass die Implementierung vollständig kompatibel mit 🤗 Transformers ist, sollten alle gemeinsamen Tests bestehen. Der Cookiecutter sollte automatisch eine Testdatei für Ihr Modell hinzugefügt haben, wahrscheinlich unter demselben `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Führen Sie diese Testdatei aus, um zu überprüfen, ob alle gängigen Tests bestehen: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Nachdem Sie alle allgemeinen Tests festgelegt haben, müssen Sie nun sicherstellen, dass all die schöne Arbeit, die Sie geleistet haben, gut getestet ist, damit - a) die Community Ihre Arbeit leicht nachvollziehen kann, indem sie sich spezifische Tests von *brand_new_bert* ansieht - b) zukünftige Änderungen an Ihrem Modell keine wichtigen Funktionen des Modells zerstören. Als erstes sollten Sie Integrationstests hinzufügen. Diese Integrationstests tun im Wesentlichen dasselbe wie die Debugging-Skripte die Sie zuvor zur Implementierung des Modells in 🤗 Transformers verwendet haben. Eine Vorlage für diese Modelltests wurde bereits von dem Cookiecutter hinzugefügt, die `BrandNewBertModelIntegrationTests` heißt und nur noch von Ihnen ausgefüllt werden muss. Um sicherzustellen, dass diese Tests erfolgreich sind, führen Sie ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Falls Sie Windows verwenden, sollten Sie `RUN_SLOW=1` durch `SET RUN_SLOW=1` ersetzen. </Tip> Zweitens sollten alle Funktionen, die speziell für *brand_new_bert* sind, zusätzlich in einem separaten Test getestet werden unter `BrandNewBertModelTester`/`BrandNewBertModelTest`. Dieser Teil wird oft vergessen, ist aber in zweierlei Hinsicht äußerst nützlich Weise: - Er hilft dabei, das Wissen, das Sie während der Modellerweiterung erworben haben, an die Community weiterzugeben, indem er zeigt, wie die speziellen Funktionen von *brand_new_bert* funktionieren sollten. - Künftige Mitwirkende können Änderungen am Modell schnell testen, indem sie diese speziellen Tests ausführen. **9. Implementieren Sie den Tokenizer** Als nächstes sollten wir den Tokenizer von *brand_new_bert* hinzufügen. Normalerweise ist der Tokenizer äquivalent oder sehr ähnlich zu einem bereits vorhandenen Tokenizer von 🤗 Transformers. Es ist sehr wichtig, die ursprüngliche Tokenizer-Datei zu finden/extrahieren und es zu schaffen, diese Datei in die 🤗 Transformers Implementierung des Tokenizers zu laden. Um sicherzustellen, dass der Tokenizer korrekt funktioniert, empfiehlt es sich, zunächst ein Skript im ursprünglichen Repository zu erstellen zu erstellen, das eine Zeichenkette eingibt und die `input_ids` zurückgibt. Es könnte etwa so aussehen (in Pseudocode): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` Möglicherweise müssen Sie noch einmal einen Blick in das ursprüngliche Repository werfen, um die richtige Tokenizer-Funktion zu finden, oder Sie müssen Sie müssen vielleicht sogar Änderungen an Ihrem Klon des Original-Repositorys vornehmen, um nur die `input_ids` auszugeben. Nach dem Schreiben ein funktionierendes Tokenisierungsskript geschrieben, das das ursprüngliche Repository verwendet, sollten Sie ein analoges Skript für 🤗 Transformers erstellt werden. Es sollte ähnlich wie dieses aussehen: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` Wenn beide `input_ids` die gleichen Werte ergeben, sollte als letzter Schritt auch eine Tokenizer-Testdatei hinzugefügt werden. Analog zu den Modellierungstestdateien von *brand_new_bert* sollten auch die Tokenisierungs-Testdateien von *brand_new_bert* eine Reihe von fest kodierten Integrationstests enthalten. **10. Führen Sie End-to-End-Integrationstests aus** Nachdem Sie den Tokenizer hinzugefügt haben, sollten Sie auch ein paar End-to-End-Integrationstests, die sowohl das Modell als auch den Tokenizer zu `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. Ein solcher Test sollte bei einem aussagekräftigen Text-zu-Text-Beispiel zeigen, dass die Implementierung von 🤗 Transformers wie erwartet funktioniert. Ein aussagekräftiges Text-zu-Text-Beispiel kann z.B. *ein Quell-zu-Ziel-Übersetzungspaar, ein Artikel-zu-Zusammenfassung-Paar, ein Frage-zu-Antwort-Paar, usw... Wenn keiner der der portierten Prüfpunkte in einer nachgelagerten Aufgabe feinabgestimmt wurde, genügt es, sich einfach auf die Modelltests zu verlassen. In einem letzten Schritt, um sicherzustellen, dass das Modell voll funktionsfähig ist, sollten Sie alle Tests auch auf der GPU durchführen. Es kann Es kann vorkommen, dass Sie vergessen haben, einige `.to(self.device)` Anweisungen zu internen Tensoren des Modells hinzuzufügen, was in einem solchen Test zu einem Fehler führen würde. Falls Sie keinen Zugang zu einem Grafikprozessor haben, kann das Hugging Face Team diese Tests für Sie durchführen. Tests für Sie übernehmen. **11. Docstring hinzufügen** Nun sind alle notwendigen Funktionen für *brand_new_bert* hinzugefügt - Sie sind fast fertig! Das Einzige, was Sie noch hinzufügen müssen, ist ein schöner Docstring und eine Doku-Seite. Der Cookiecutter sollte eine Vorlagendatei namens `docs/source/model_doc/brand_new_bert.md` hinzugefügt haben, die Sie ausfüllen sollten. Die Benutzer Ihres Modells werden in der Regel zuerst einen Blick auf diese Seite ansehen, bevor sie Ihr Modell verwenden. Daher muss die Dokumentation verständlich und prägnant sein. Es ist sehr nützlich für die Gemeinschaft, einige *Tipps* hinzuzufügen, um zu zeigen, wie das Modell verwendet werden sollte. Zögern Sie nicht, das Hugging Face-Team anzupingen bezüglich der Docstrings. Stellen Sie als nächstes sicher, dass der zu `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` hinzugefügte docstring korrekt ist und alle erforderlichen Eingaben und Ausgaben enthält. Wir haben eine ausführliche Anleitung zum Schreiben von Dokumentationen und unserem Docstring-Format [hier](writing-documentation). Es ist immer gut, sich daran zu erinnern, dass die Dokumentation mindestens so sorgfältig behandelt werden sollte wie der Code in 🤗 Transformers, denn die Dokumentation ist in der Regel der erste Kontaktpunkt der Berührungspunkt der Community mit dem Modell ist. **Code refactor** Großartig, jetzt haben Sie den gesamten erforderlichen Code für *brand_new_bert* hinzugefügt. An diesem Punkt sollten Sie einige mögliche falschen Codestil korrigieren, indem Sie ausführen: ```bash make style ``` und überprüfen Sie, ob Ihr Kodierungsstil die Qualitätsprüfung besteht: ```bash make quality ``` Es gibt noch ein paar andere sehr strenge Designtests in 🤗 Transformers, die möglicherweise noch fehlschlagen, was sich in den den Tests Ihres Pull Requests. Dies liegt oft an fehlenden Informationen im Docstring oder an einer falschen Benennung. Das Hugging Face Team wird Ihnen sicherlich helfen, wenn Sie hier nicht weiterkommen. Und schließlich ist es immer eine gute Idee, den eigenen Code zu refaktorisieren, nachdem man sichergestellt hat, dass er korrekt funktioniert. Wenn alle Tests bestanden haben, ist es nun an der Zeit, den hinzugefügten Code noch einmal durchzugehen und einige Überarbeitungen vorzunehmen. Sie haben nun den Codierungsteil abgeschlossen, herzlichen Glückwunsch! 🎉 Sie sind großartig! 😎 **12. Laden Sie die Modelle in den Model Hub hoch** In diesem letzten Teil sollten Sie alle Checkpoints konvertieren und in den Modell-Hub hochladen und eine Modellkarte für jeden hochgeladenen Modell-Kontrollpunkt. Sie können sich mit den Hub-Funktionen vertraut machen, indem Sie unsere [Model sharing and uploading Page](model_sharing) lesen. Hier sollten Sie mit dem Hugging Face-Team zusammenarbeiten, um einen passenden Namen für jeden Checkpoint festzulegen und die erforderlichen Zugriffsrechte zu erhalten, um das Modell unter der Organisation des Autors *brand_new_bert* hochladen zu können. *brand_new_bert*. Die Methode `push_to_hub`, die in allen Modellen in `transformers` vorhanden ist, ist ein schneller und effizienter Weg, Ihren Checkpoint in den Hub zu pushen. Ein kleines Snippet ist unten eingefügt: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` Es lohnt sich, etwas Zeit darauf zu verwenden, für jeden Kontrollpunkt passende Musterkarten zu erstellen. Die Modellkarten sollten die spezifischen Merkmale dieses bestimmten Prüfpunkts hervorheben, * z.B.* auf welchem Datensatz wurde der Prüfpunkt vortrainiert/abgestimmt? Für welche nachgelagerte Aufgabe sollte das Modell verwendet werden? Und fügen Sie auch etwas Code bei, wie Sie wie das Modell korrekt verwendet wird. **13. (Optional) Notizbuch hinzufügen** Es ist sehr hilfreich, ein Notizbuch hinzuzufügen, in dem im Detail gezeigt wird, wie *brand_new_bert* für Schlussfolgerungen verwendet werden kann und/oder bei einer nachgelagerten Aufgabe feinabgestimmt wird. Dies ist nicht zwingend erforderlich, um Ihren PR zusammenzuführen, aber sehr nützlich für die Gemeinschaft. **14. Reichen Sie Ihren fertigen PR ein** Sie sind jetzt mit der Programmierung fertig und können zum letzten Schritt übergehen, nämlich der Zusammenführung Ihres PR mit main. Normalerweise hat das Hugging Face Team Ihnen an diesem Punkt bereits geholfen haben, aber es lohnt sich, sich etwas Zeit zu nehmen, um Ihrem fertigen PR eine schöne Beschreibung zu geben und eventuell Kommentare zu Ihrem Code hinzuzufügen, wenn Sie Ihren Gutachter auf bestimmte Designentscheidungen hinweisen wollen. Gutachter hinweisen wollen. ### Teilen Sie Ihre Arbeit!! Jetzt ist es an der Zeit, von der Community Anerkennung für Ihre Arbeit zu bekommen! Die Fertigstellung einer Modellergänzung ist ein wichtiger Beitrag zu Transformers und der gesamten NLP-Gemeinschaft. Ihr Code und die portierten vortrainierten Modelle werden sicherlich von Hunderten und vielleicht sogar Tausenden von Entwicklern und Forschern genutzt werden. Sie sollten stolz auf Ihre Arbeit sein und Ihre Ihre Leistung mit der Gemeinschaft teilen. **Sie haben ein weiteres Modell erstellt, das für jeden in der Community super einfach zugänglich ist! 🤯**
transformers/docs/source/de/add_new_model.md/0
{ "file_path": "transformers/docs/source/de/add_new_model.md", "repo_id": "transformers", "token_count": 24185 }
234
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Contribute to 🤗 Transformers Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable. It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you. However you choose to contribute, please be mindful and respect our [code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md). **This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).** ## Ways to contribute There are several ways you can contribute to 🤗 Transformers: * Fix outstanding issues with the existing code. * Submit issues related to bugs or desired new features. * Implement new models. * Contribute to the examples or to the documentation. If you don't know where to start, there is a special [Good First Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of open issues that are beginner-friendly and help you start contributing to open-source. The best way to do that is to open a Pull Request and link it to the issue that you'd like to work on. We try to give priority to opened PRs as we can easily track the progress of the fix, and if the contributor does not have time anymore, someone else can take the PR over. For something slightly more challenging, you can also take a look at the [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! 🚀 > All contributions are equally valuable to the community. 🥰 ## Fixing outstanding issues If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](#create-a-pull-request) and open a Pull Request! ## Submitting a bug-related issue or feature request Do your best to follow these guidelines when submitting a bug-related issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback. ### Did you find a bug? The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter. Before you report an issue, we would really appreciate it if you could **make sure the bug was not already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions. Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it: * Your **OS type and version** and **Python**, **PyTorch** and **TensorFlow** versions when applicable. * A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s. * The *full* traceback if an exception is raised. * Attach any other additional information, like screenshots, you think may help. To get the OS and software versions automatically, run the following command: ```bash transformers-cli env ``` You can also run the same command from the root of the repository: ```bash python src/transformers/commands/transformers_cli.py env ``` ### Do you want a new feature? If there is a new feature you'd like to see in 🤗 Transformers, please open an issue and describe: 1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the library? Is it a feature related to something you need for a project? Is it something you worked on and think it could benefit the community? Whatever it is, we'd love to hear about it! 2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better we'll be able to help you. 3. Provide a *code snippet* that demonstrates the features usage. 4. If the feature is related to a paper, please include a link. If your issue is well written we're already 80% of the way there by the time you create it. We have added [templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with your issue. ## Do you want to implement a new model? New models are constantly released and if you want to implement a new model, please provide the following information: * A short description of the model and a link to the paper. * Link to the implementation if it is open-sourced. * Link to the model weights if they are available. If you are willing to contribute the model yourself, let us know so we can help you add it to 🤗 Transformers! We have added a [detailed guide and templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with adding a new model, and we also have a more technical guide for [how to add a model to 🤗 Transformers](https://huggingface.co/docs/transformers/add_new_model). ## Do you want to add documentation? We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be happy to make the changes or help you make a contribution if you're interested! For more details about how to generate, build, and write the documentation, take a look at the documentation [README](https://github.com/huggingface/transformers/tree/main/docs). ## Create a Pull Request Before writing any code, we strongly advise you to search through the existing PRs or issues to make sure nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to contribute to 🤗 Transformers. While `git` is not the easiest tool to use, it has the greatest manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing: 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash git clone git@github.com:<your Github handle>/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes: ```bash git checkout -b a-descriptive-name-for-my-changes ``` 🚨 **Do not** work on the `main` branch! 4. Set up a development environment by running the following command in a virtual environment: ```bash pip install -e ".[dev]" ``` If 🤗 Transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag. Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a failure with this command. If that's the case make sure to install the Deep Learning framework you are working with (PyTorch, TensorFlow and/or Flax) then do: ```bash pip install -e ".[quality]" ``` which should be enough for most use cases. 5. Develop the features in your branch. As you work on your code, you should make sure the test suite passes. Run the tests impacted by your changes like this: ```bash pytest tests/<TEST_TO_RUN>.py ``` For more information about tests, check out the [Testing](https://huggingface.co/docs/transformers/testing) guide. 🤗 Transformers relies on `black` and `ruff` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: ```bash make fixup ``` This target is also optimized to only work with files modified by the PR you're working on. If you prefer to run the checks one after the other, the following command applies the style corrections: ```bash make style ``` 🤗 Transformers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality controls are run by the CI, but you can run the same checks with: ```bash make quality ``` Finally, we have a lot of scripts to make sure we don't forget to update some files when adding a new model. You can run these scripts with: ```bash make repo-consistency ``` To learn more about those checks and how to fix any issues with them, check out the [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide. If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check make sure you install the documentation builder: ```bash pip install ".[docs]" ``` Run the following command from the root of the repository: ```bash doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build ``` This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated Markdown files with your favorite editor. You can also preview the docs on GitHub when you open a pull request. Once you're happy with your changes, add the changed files with `git add` and record your changes locally with `git commit`: ```bash git add modified_file.py git commit ``` Please remember to write [good commit messages](https://chris.beams.io/posts/git-commit/) to clearly communicate the changes you made! To keep your copy of the code up to date with the original repository, rebase your branch on `upstream/branch` *before* you open a pull request or if requested by a maintainer: ```bash git fetch upstream git rebase upstream/main ``` Push your changes to your branch: ```bash git push -u origin a-descriptive-name-for-my-changes ``` If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally. 6. Now you can go to your fork of the repository on GitHub and click on **Pull Request** to open a pull request. Make sure you tick off all the boxes on our [checklist](#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review. 7. It's ok if maintainers request changes, it happens to our core contributors too! So everyone can see the changes in the pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Pull request checklist ☐ The pull request title should summarize your contribution.<br> ☐ If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people viewing the issue know you are working on it).<br> ☐ To indicate a work in progress please prefix the title with `[WIP]`. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged.<br> ☐ Make sure existing tests pass.<br> ☐ If adding a new feature, also add tests for it.<br> - If you are adding a new model, make sure you use `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)` to trigger the common tests. - If you are adding new `@slow` tests, make sure they pass using `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`. - If you are adding a new tokenizer, write tests and make sure `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py` passes. - CircleCI does not run the slow tests, but GitHub Actions does every night!<br> ☐ All public methods must have informative docstrings (see [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) for an example).<br> ☐ Due to the rapidly growing repository, don't add any images, videos and other non-text files that'll significantly weigh down the repository. Instead, use a Hub repository such as [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) to host these files and reference them by URL. We recommend placing documentation related images in the following repository: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). You can open a PR on this dataset repository and ask a Hugging Face member to merge it. For more information about the checks run on a pull request, take a look at our [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide. ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests](https://github.com/huggingface/transformers/tree/main/tests) folder and examples tests in the [examples](https://github.com/huggingface/transformers/tree/main/examples) folder. We like `pytest` and `pytest-xdist` because it's faster. From the root of the repository, specify a *path to a subfolder or a test file* to run the test: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model ``` Similarly, for the `examples` directory, specify a *path to a subfolder or test file* to run the test. For example, the following command tests the text classification subfolder in the PyTorch `examples` directory: ```bash pip install -r examples/xxx/requirements.txt # only needed the first time python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` In fact, this is actually how our `make test` and `make test-examples` commands are implemented (not including the `pip install`)! You can also specify a smaller set of tests in order to test only the feature you're working on. By default, slow tests are skipped but you can set the `RUN_SLOW` environment variable to `yes` to run them. This will download many gigabytes of models so make sure you have enough disk space, a good internet connection or a lot of patience! <Tip warning={true}> Remember to specify a *path to a subfolder or a test file* to run the test. Otherwise, you'll run all the tests in the `tests` or `examples` folder, which will take a very long time! </Tip> ```bash RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` Like the slow tests, there are other environment variables available which not enabled by default during testing: - `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers. - `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration. - `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration. More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py). 🤗 Transformers uses `pytest` as a test runner only. It doesn't use any `pytest`-specific features in the test suite itself. This means `unittest` is fully supported. Here's how to run tests with `unittest`: ```bash python -m unittest discover -s tests -t . -v python -m unittest discover -s examples -t examples -v ``` ### Style guide For documentation strings, 🤗 Transformers follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html). Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification) for more information. ### Develop on Windows On Windows (unless you're working in [Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) or WSL), you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings: ```bash git config core.autocrlf input ``` One way to run the `make` command on Windows is with MSYS2: 1. [Download MSYS2](https://www.msys2.org/), and we assume it's installed in `C:\msys64`. 2. Open the command line `C:\msys64\msys2.exe` (it should be available from the **Start** menu). 3. Run in the shell: `pacman -Syu` and install `make` with `pacman -S make`. 4. Add `C:\msys64\usr\bin` to your PATH environment variable. You can now use `make` from any terminal (PowerShell, cmd.exe, etc.)! 🎉 ### Sync a forked repository with upstream main (the Hugging Face repository) When updating the main branch of a forked repository, please follow these steps to avoid pinging the upstream repository which adds reference notes to each upstream PR, and sends unnecessary notifications to the developers involved in these PRs. 1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main. 2. If a PR is absolutely necessary, use the following steps after checking out your branch: ```bash git checkout -b your-branch-for-syncing git pull --squash --no-commit upstream main git commit -m '<your message without GitHub references>' git push --set-upstream origin your-branch-for-syncing ```
transformers/docs/source/en/contributing.md/0
{ "file_path": "transformers/docs/source/en/contributing.md", "repo_id": "transformers", "token_count": 5137 }
235
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Utilities for Generation This page lists all the utility functions used by [`~generation.GenerationMixin.generate`]. ## Generate Outputs The output of [`~generation.GenerationMixin.generate`] is an instance of a subclass of [`~utils.ModelOutput`]. This output is a data structure containing all the information returned by [`~generation.GenerationMixin.generate`], but that can also be used as tuple or dictionary. Here's an example: ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt") generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) ``` The `generation_output` object is a [`~generation.GenerateDecoderOnlyOutput`], as we can see in the documentation of that class below, it means it has the following attributes: - `sequences`: the generated sequences of tokens - `scores` (optional): the prediction scores of the language modelling head, for each generation step - `hidden_states` (optional): the hidden states of the model, for each generation step - `attentions` (optional): the attention weights of the model, for each generation step Here we have the `scores` since we passed along `output_scores=True`, but we don't have `hidden_states` and `attentions` because we didn't pass `output_hidden_states=True` or `output_attentions=True`. You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you will get `None`. Here for instance `generation_output.scores` are all the generated prediction scores of the language modeling head, and `generation_output.attentions` is `None`. When using our `generation_output` object as a tuple, it only keeps the attributes that don't have `None` values. Here, for instance, it has two elements, `loss` then `logits`, so ```python generation_output[:2] ``` will return the tuple `(generation_output.sequences, generation_output.scores)` for instance. When using our `generation_output` object as a dictionary, it only keeps the attributes that don't have `None` values. Here, for instance, it has two keys that are `sequences` and `scores`. We document here all output types. ### PyTorch [[autodoc]] generation.GenerateDecoderOnlyOutput [[autodoc]] generation.GenerateEncoderDecoderOutput [[autodoc]] generation.GenerateBeamDecoderOnlyOutput [[autodoc]] generation.GenerateBeamEncoderDecoderOutput ### TensorFlow [[autodoc]] generation.TFGreedySearchEncoderDecoderOutput [[autodoc]] generation.TFGreedySearchDecoderOnlyOutput [[autodoc]] generation.TFSampleEncoderDecoderOutput [[autodoc]] generation.TFSampleDecoderOnlyOutput [[autodoc]] generation.TFBeamSearchEncoderDecoderOutput [[autodoc]] generation.TFBeamSearchDecoderOnlyOutput [[autodoc]] generation.TFBeamSampleEncoderDecoderOutput [[autodoc]] generation.TFBeamSampleDecoderOnlyOutput [[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput [[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput ### FLAX [[autodoc]] generation.FlaxSampleOutput [[autodoc]] generation.FlaxGreedySearchOutput [[autodoc]] generation.FlaxBeamSearchOutput ## LogitsProcessor A [`LogitsProcessor`] can be used to modify the prediction scores of a language model head for generation. ### PyTorch [[autodoc]] AlternatingCodebooksLogitsProcessor - __call__ [[autodoc]] ClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] EncoderNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] EncoderRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] EpsilonLogitsWarper - __call__ [[autodoc]] EtaLogitsWarper - __call__ [[autodoc]] ExponentialDecayLengthPenalty - __call__ [[autodoc]] ForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] ForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] ForceTokensLogitsProcessor - __call__ [[autodoc]] HammingDiversityLogitsProcessor - __call__ [[autodoc]] InfNanRemoveLogitsProcessor - __call__ [[autodoc]] LogitNormalization - __call__ [[autodoc]] LogitsProcessor - __call__ [[autodoc]] LogitsProcessorList - __call__ [[autodoc]] LogitsWarper - __call__ [[autodoc]] MinLengthLogitsProcessor - __call__ [[autodoc]] MinNewTokensLengthLogitsProcessor - __call__ [[autodoc]] NoBadWordsLogitsProcessor - __call__ [[autodoc]] NoRepeatNGramLogitsProcessor - __call__ [[autodoc]] PrefixConstrainedLogitsProcessor - __call__ [[autodoc]] RepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] SequenceBiasLogitsProcessor - __call__ [[autodoc]] SuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] SuppressTokensLogitsProcessor - __call__ [[autodoc]] TemperatureLogitsWarper - __call__ [[autodoc]] TopKLogitsWarper - __call__ [[autodoc]] TopPLogitsWarper - __call__ [[autodoc]] TypicalLogitsWarper - __call__ [[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor - __call__ [[autodoc]] WhisperTimeStampLogitsProcessor - __call__ ### TensorFlow [[autodoc]] TFForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] TFForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] TFForceTokensLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessor - __call__ [[autodoc]] TFLogitsProcessorList - __call__ [[autodoc]] TFLogitsWarper - __call__ [[autodoc]] TFMinLengthLogitsProcessor - __call__ [[autodoc]] TFNoBadWordsLogitsProcessor - __call__ [[autodoc]] TFNoRepeatNGramLogitsProcessor - __call__ [[autodoc]] TFRepetitionPenaltyLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] TFSuppressTokensLogitsProcessor - __call__ [[autodoc]] TFTemperatureLogitsWarper - __call__ [[autodoc]] TFTopKLogitsWarper - __call__ [[autodoc]] TFTopPLogitsWarper - __call__ ### FLAX [[autodoc]] FlaxForcedBOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForcedEOSTokenLogitsProcessor - __call__ [[autodoc]] FlaxForceTokensLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessor - __call__ [[autodoc]] FlaxLogitsProcessorList - __call__ [[autodoc]] FlaxLogitsWarper - __call__ [[autodoc]] FlaxMinLengthLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor - __call__ [[autodoc]] FlaxSuppressTokensLogitsProcessor - __call__ [[autodoc]] FlaxTemperatureLogitsWarper - __call__ [[autodoc]] FlaxTopKLogitsWarper - __call__ [[autodoc]] FlaxTopPLogitsWarper - __call__ [[autodoc]] FlaxWhisperTimeStampLogitsProcessor - __call__ ## StoppingCriteria A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token). Please note that this is exclusively available to our PyTorch implementations. [[autodoc]] StoppingCriteria - __call__ [[autodoc]] StoppingCriteriaList - __call__ [[autodoc]] MaxLengthCriteria - __call__ [[autodoc]] MaxTimeCriteria - __call__ ## Constraints A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusively available to our PyTorch implementations. [[autodoc]] Constraint [[autodoc]] PhrasalConstraint [[autodoc]] DisjunctiveConstraint [[autodoc]] ConstraintListState ## BeamSearch [[autodoc]] BeamScorer - process - finalize [[autodoc]] BeamSearchScorer - process - finalize [[autodoc]] ConstrainedBeamSearchScorer - process - finalize ## Streamers [[autodoc]] TextStreamer [[autodoc]] TextIteratorStreamer ## Caches [[autodoc]] Cache - update [[autodoc]] DynamicCache - update - get_seq_length - reorder_cache - to_legacy_cache - from_legacy_cache [[autodoc]] SinkCache - update - get_seq_length - reorder_cache [[autodoc]] StaticCache - update - get_seq_length
transformers/docs/source/en/internal/generation_utils.md/0
{ "file_path": "transformers/docs/source/en/internal/generation_utils.md", "repo_id": "transformers", "token_count": 2989 }
236
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image Processor An image processor is in charge of preparing input features for vision models and post processing their outputs. This includes transformations such as resizing, normalization, and conversion to PyTorch, TensorFlow, Flax and Numpy tensors. It may also include model specific post-processing such as converting logits to segmentation masks. ## ImageProcessingMixin [[autodoc]] image_processing_utils.ImageProcessingMixin - from_pretrained - save_pretrained ## BatchFeature [[autodoc]] BatchFeature ## BaseImageProcessor [[autodoc]] image_processing_utils.BaseImageProcessor
transformers/docs/source/en/main_classes/image_processor.md/0
{ "file_path": "transformers/docs/source/en/main_classes/image_processor.md", "repo_id": "transformers", "token_count": 343 }
237
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Audio Spectrogram Transformer ## Overview The Audio Spectrogram Transformer model was proposed in [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass. The Audio Spectrogram Transformer applies a [Vision Transformer](vit) to audio, by turning audio into an image (spectrogram). The model obtains state-of-the-art results for audio classification. The abstract from the paper is the following: *In the past decade, convolutional neural networks (CNNs) have been widely adopted as the main building block for end-to-end audio classification models, which aim to learn a direct mapping from audio spectrograms to corresponding labels. To better capture long-range global context, a recent trend is to add a self-attention mechanism on top of the CNN, forming a CNN-attention hybrid model. However, it is unclear whether the reliance on a CNN is necessary, and if neural networks purely based on attention are sufficient to obtain good performance in audio classification. In this paper, we answer the question by introducing the Audio Spectrogram Transformer (AST), the first convolution-free, purely attention-based model for audio classification. We evaluate AST on various audio classification benchmarks, where it achieves new state-of-the-art results of 0.485 mAP on AudioSet, 95.6% accuracy on ESC-50, and 98.1% accuracy on Speech Commands V2.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png" alt="drawing" width="600"/> <small> Audio Spectrogram Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2104.01778">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/YuanGongND/ast). ## Usage tips - When fine-tuning the Audio Spectrogram Transformer (AST) on your own dataset, it's recommended to take care of the input normalization (to make sure the input has mean of 0 and std of 0.5). [`ASTFeatureExtractor`] takes care of this. Note that it uses the AudioSet mean and std by default. You can check [`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py) to see how the authors compute the stats for a downstream dataset. - Note that the AST needs a low learning rate (the authors use a 10 times smaller learning rate compared to their CNN model proposed in the [PSLA paper](https://arxiv.org/abs/2102.01243)) and converges quickly, so please search for a suitable learning rate and learning rate scheduler for your task. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with the Audio Spectrogram Transformer. <PipelineTag pipeline="audio-classification"/> - A notebook illustrating inference with AST for audio classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST). - [`ASTForAudioClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb). - See also: [Audio classification](../tasks/audio_classification). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ASTConfig [[autodoc]] ASTConfig ## ASTFeatureExtractor [[autodoc]] ASTFeatureExtractor - __call__ ## ASTModel [[autodoc]] ASTModel - forward ## ASTForAudioClassification [[autodoc]] ASTForAudioClassification - forward
transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md", "repo_id": "transformers", "token_count": 1220 }
238
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Blenderbot Small Note that [`BlenderbotSmallModel`] and [`BlenderbotSmallForConditionalGeneration`] are only used in combination with the checkpoint [facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M). Larger Blenderbot checkpoints should instead be used with [`BlenderbotModel`] and [`BlenderbotForConditionalGeneration`] ## Overview The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020. The abstract of the paper is the following: *Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that scaling neural models in the number of parameters and the size of the data they are trained on gives improved results, we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent persona. We show that large scale models can learn these skills when given appropriate training data and choice of generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing failure cases of our models.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be found [here](https://github.com/facebookresearch/ParlAI). ## Usage tips Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## BlenderbotSmallConfig [[autodoc]] BlenderbotSmallConfig ## BlenderbotSmallTokenizer [[autodoc]] BlenderbotSmallTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BlenderbotSmallTokenizerFast [[autodoc]] BlenderbotSmallTokenizerFast <frameworkcontent> <pt> ## BlenderbotSmallModel [[autodoc]] BlenderbotSmallModel - forward ## BlenderbotSmallForConditionalGeneration [[autodoc]] BlenderbotSmallForConditionalGeneration - forward ## BlenderbotSmallForCausalLM [[autodoc]] BlenderbotSmallForCausalLM - forward </pt> <tf> ## TFBlenderbotSmallModel [[autodoc]] TFBlenderbotSmallModel - call ## TFBlenderbotSmallForConditionalGeneration [[autodoc]] TFBlenderbotSmallForConditionalGeneration - call </tf> <jax> ## FlaxBlenderbotSmallModel [[autodoc]] FlaxBlenderbotSmallModel - __call__ - encode - decode ## FlaxBlenderbotForConditionalGeneration [[autodoc]] FlaxBlenderbotSmallForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/blenderbot-small.md/0
{ "file_path": "transformers/docs/source/en/model_doc/blenderbot-small.md", "repo_id": "transformers", "token_count": 1170 }
239
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Encoder Decoder Models ## Overview The [`EncoderDecoderModel`] can be used to initialize a sequence-to-sequence model with any pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder. The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. After such an [`EncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples for more information). An application of this architecture could be to leverage two pretrained [`BertModel`] as the encoder and decoder for a summarization model as was shown in: [Text Summarization with Pretrained Encoders](https://arxiv.org/abs/1908.08345) by Yang Liu and Mirella Lapata. ## Randomly initializing `EncoderDecoderModel` from model configurations. [`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder. ```python >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel >>> config_encoder = BertConfig() >>> config_decoder = BertConfig() >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> model = EncoderDecoderModel(config=config) ``` ## Initialising `EncoderDecoderModel` from a pretrained encoder and a pretrained decoder. [`EncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained auto-encoding model, *e.g.* BERT, can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder. Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized. Initializing [`EncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder). To do so, the `EncoderDecoderModel` class provides a [`EncoderDecoderModel.from_encoder_decoder_pretrained`] method. ```python >>> from transformers import EncoderDecoderModel, BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased") ``` ## Loading an existing `EncoderDecoderModel` checkpoint and perform inference. To load fine-tuned checkpoints of the `EncoderDecoderModel` class, [`EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers. To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling. ```python >>> from transformers import AutoTokenizer, EncoderDecoderModel >>> # load a fine-tuned seq2seq model and corresponding tokenizer >>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail") >>> # let's perform inference on a long piece of text >>> ARTICLE_TO_SUMMARIZE = ( ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." ... ) >>> input_ids = tokenizer(ARTICLE_TO_SUMMARIZE, return_tensors="pt").input_ids >>> # autoregressively generate summary (uses greedy decoding by default) >>> generated_ids = model.generate(input_ids) >>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) nearly 800 thousand customers were affected by the shutoffs. the aim is to reduce the risk of wildfires. nearly 800, 000 customers were expected to be affected by high winds amid dry conditions. pg & e said it scheduled the blackouts to last through at least midday tomorrow. ``` ## Loading a PyTorch checkpoint into `TFEncoderDecoderModel`. [`TFEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a pytorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only pytorch checkpoints for a particular encoder-decoder model, a workaround is: ```python >>> # a workaround to load from pytorch checkpoint >>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel >>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") >>> _model.encoder.save_pretrained("./encoder") >>> _model.decoder.save_pretrained("./decoder") >>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained( ... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True ... ) >>> # This is only for copying some specific attributes of this particular model. >>> model.config = _model.config ``` ## Training Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model. As you can see, only 2 inputs are required for the model in order to compute a loss: `input_ids` (which are the `input_ids` of the encoded input sequence) and `labels` (which are the `input_ids` of the encoded target sequence). ```python >>> from transformers import BertTokenizer, EncoderDecoderModel >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("google-bert/bert-base-uncased", "google-bert/bert-base-uncased") >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> input_ids = tokenizer( ... "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side.During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft).Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", ... return_tensors="pt", ... ).input_ids >>> labels = tokenizer( ... "the eiffel tower surpassed the washington monument to become the tallest structure in the world. it was the first structure to reach a height of 300 metres in paris in 1930. it is now taller than the chrysler building by 5. 2 metres ( 17 ft ) and is the second tallest free - standing structure in paris.", ... return_tensors="pt", ... ).input_ids >>> # the forward function automatically creates the correct decoder_input_ids >>> loss = model(input_ids=input_ids, labels=labels).loss ``` Detailed [colab](https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing#scrollTo=ZwQIEhKOrJpl) for training. This model was contributed by [thomwolf](https://github.com/thomwolf). This model's TensorFlow and Flax versions were contributed by [ydshieh](https://github.com/ydshieh). ## EncoderDecoderConfig [[autodoc]] EncoderDecoderConfig <frameworkcontent> <pt> ## EncoderDecoderModel [[autodoc]] EncoderDecoderModel - forward - from_encoder_decoder_pretrained </pt> <tf> ## TFEncoderDecoderModel [[autodoc]] TFEncoderDecoderModel - call - from_encoder_decoder_pretrained </tf> <jax> ## FlaxEncoderDecoderModel [[autodoc]] FlaxEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/encoder-decoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/encoder-decoder.md", "repo_id": "transformers", "token_count": 2664 }
240
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. specific language governing permissions and limitations under the License. --> # ImageGPT ## Overview The ImageGPT model was proposed in [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation. The abstract from the paper is the following: *Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png" alt="drawing" width="600"/> <small> Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small> This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found [here](https://github.com/openai/image-gpt). ## Usage tips - ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT also doesn't have tied input- and output embeddings. - As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special "start of sentence" (SOS) token, used at the beginning of every sequence. One can use [`ImageGPTImageProcessor`] to prepare images for the model. - Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as "linear probing". Features can be easily obtained by first forwarding the image through the model, then specifying `output_hidden_states=True`, and then average-pool the hidden states at whatever layer you like. - Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use [`ImageGPTForImageClassification`]. - ImageGPT comes in different sizes: there's ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn't release. The differences in size are summarized in the following table: | **Model variant** | **Depths** | **Hidden sizes** | **Decoder hidden size** | **Params (M)** | **ImageNet-1k Top 1** | |---|---|---|---|---|---| | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ImageGPT. <PipelineTag pipeline="image-classification"/> - Demo notebooks for ImageGPT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT). - [`ImageGPTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ImageGPTConfig [[autodoc]] ImageGPTConfig ## ImageGPTFeatureExtractor [[autodoc]] ImageGPTFeatureExtractor - __call__ ## ImageGPTImageProcessor [[autodoc]] ImageGPTImageProcessor - preprocess ## ImageGPTModel [[autodoc]] ImageGPTModel - forward ## ImageGPTForCausalImageModeling [[autodoc]] ImageGPTForCausalImageModeling - forward ## ImageGPTForImageClassification [[autodoc]] ImageGPTForImageClassification - forward
transformers/docs/source/en/model_doc/imagegpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/imagegpt.md", "repo_id": "transformers", "token_count": 1915 }
241
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LongT5 ## Overview The LongT5 model was proposed in [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung and Yinfei Yang. It's an encoder-decoder transformer pre-trained in a text-to-text denoising generative setting. LongT5 model is an extension of T5 model, and it enables using one of the two different efficient attention mechanisms - (1) Local attention, or (2) Transient-Global attention. The abstract from the paper is the following: *Recent work has shown that either (1) increasing the input length or (2) increasing model size can improve the performance of Transformer-based neural models. In this paper, we present a new model, called LongT5, with which we explore the effects of scaling both the input length and model size at the same time. Specifically, we integrated attention ideas from long-input transformers (ETC), and adopted pre-training strategies from summarization pre-training (PEGASUS) into the scalable T5 architecture. The result is a new attention mechanism we call {\em Transient Global} (TGlobal), which mimics ETC's local/global attention mechanism, but without requiring additional side-inputs. We are able to achieve state-of-the-art results on several summarization tasks and outperform the original T5 models on question answering tasks.* This model was contributed by [stancld](https://huggingface.co/stancld). The original code can be found [here](https://github.com/google-research/longt5). ## Usage tips - [`LongT5ForConditionalGeneration`] is an extension of [`T5ForConditionalGeneration`] exchanging the traditional encoder *self-attention* layer with efficient either *local* attention or *transient-global* (*tglobal*) attention. - Unlike the T5 model, LongT5 does not use a task prefix. Furthermore, it uses a different pre-training objective inspired by the pre-training of [`PegasusForConditionalGeneration`]. - LongT5 model is designed to work efficiently and very well on long-range *sequence-to-sequence* tasks where the input sequence exceeds commonly used 512 tokens. It is capable of handling input sequences of a length up to 16,384 tokens. - For *Local Attention*, the sparse sliding-window local attention operation allows a given token to attend only `r` tokens to the left and right of it (with `r=127` by default). *Local Attention* does not introduce any new parameters to the model. The complexity of the mechanism is linear in input sequence length `l`: `O(l*r)`. - *Transient Global Attention* is an extension of the *Local Attention*. It, furthermore, allows each input token to interact with all other tokens in the layer. This is achieved via splitting an input sequence into blocks of a fixed length `k` (with a default `k=16`). Then, a global token for such a block is obtained via summing and normalizing the embeddings of every token in the block. Thanks to this, the attention allows each token to attend to both nearby tokens like in Local attention, and also every global token like in the case of standard global attention (*transient* represents the fact the global tokens are constructed dynamically within each attention operation). As a consequence, *TGlobal* attention introduces a few new parameters -- global relative position biases and a layer normalization for global token's embedding. The complexity of this mechanism is `O(l(r + l/k))`. - An example showing how to evaluate a fine-tuned LongT5 model on the [pubmed dataset](https://huggingface.co/datasets/scientific_papers) is below. ```python >>> import evaluate >>> from datasets import load_dataset >>> from transformers import AutoTokenizer, LongT5ForConditionalGeneration >>> dataset = load_dataset("scientific_papers", "pubmed", split="validation") >>> model = ( ... LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") ... .to("cuda") ... .half() ... ) >>> tokenizer = AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps") >>> def generate_answers(batch): ... inputs_dict = tokenizer( ... batch["article"], max_length=16384, padding="max_length", truncation=True, return_tensors="pt" ... ) ... input_ids = inputs_dict.input_ids.to("cuda") ... attention_mask = inputs_dict.attention_mask.to("cuda") ... output_ids = model.generate(input_ids, attention_mask=attention_mask, max_length=512, num_beams=2) ... batch["predicted_abstract"] = tokenizer.batch_decode(output_ids, skip_special_tokens=True) ... return batch >>> result = dataset.map(generate_answer, batched=True, batch_size=2) >>> rouge = evaluate.load("rouge") >>> rouge.compute(predictions=result["predicted_abstract"], references=result["abstract"]) ``` ## Resources - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## LongT5Config [[autodoc]] LongT5Config <frameworkcontent> <pt> ## LongT5Model [[autodoc]] LongT5Model - forward ## LongT5ForConditionalGeneration [[autodoc]] LongT5ForConditionalGeneration - forward ## LongT5EncoderModel [[autodoc]] LongT5EncoderModel - forward </pt> <jax> ## FlaxLongT5Model [[autodoc]] FlaxLongT5Model - __call__ - encode - decode ## FlaxLongT5ForConditionalGeneration [[autodoc]] FlaxLongT5ForConditionalGeneration - __call__ - encode - decode </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/longt5.md/0
{ "file_path": "transformers/docs/source/en/model_doc/longt5.md", "repo_id": "transformers", "token_count": 1797 }
242
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Neighborhood Attention Transformer ## Overview NAT was proposed in [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. It is a hierarchical vision transformer based on Neighborhood Attention, a sliding-window self attention pattern. The abstract from the paper is the following: *We present Neighborhood Attention (NA), the first efficient and scalable sliding-window attention mechanism for vision. NA is a pixel-wise operation, localizing self attention (SA) to the nearest neighboring pixels, and therefore enjoys a linear time and space complexity compared to the quadratic complexity of SA. The sliding-window pattern allows NA's receptive field to grow without needing extra pixel shifts, and preserves translational equivariance, unlike Swin Transformer's Window Self Attention (WSA). We develop NATTEN (Neighborhood Attention Extension), a Python package with efficient C++ and CUDA kernels, which allows NA to run up to 40% faster than Swin's WSA while using up to 25% less memory. We further present Neighborhood Attention Transformer (NAT), a new hierarchical transformer design based on NA that boosts image classification and downstream vision performance. Experimental results on NAT are competitive; NAT-Tiny reaches 83.2% top-1 accuracy on ImageNet, 51.4% mAP on MS-COCO and 48.4% mIoU on ADE20K, which is 1.9% ImageNet accuracy, 1.0% COCO mAP, and 2.6% ADE20K mIoU improvement over a Swin model with similar size. * <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/neighborhood-attention-pattern.jpg" alt="drawing" width="600"/> <small> Neighborhood Attention compared to other attention patterns. Taken from the <a href="https://arxiv.org/abs/2204.07143">original paper</a>.</small> This model was contributed by [Ali Hassani](https://huggingface.co/alihassanijr). The original code can be found [here](https://github.com/SHI-Labs/Neighborhood-Attention-Transformer). ## Usage tips - One can use the [`AutoImageProcessor`] API to prepare images for the model. - NAT can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, height, width, num_channels)`. Notes: - NAT depends on [NATTEN](https://github.com/SHI-Labs/NATTEN/)'s implementation of Neighborhood Attention. You can install it with pre-built wheels for Linux by referring to [shi-labs.com/natten](https://shi-labs.com/natten), or build on your system by running `pip install natten`. Note that the latter will likely take time to compile. NATTEN does not support Windows devices yet. - Patch size of 4 is only supported at the moment. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with NAT. <PipelineTag pipeline="image-classification"/> - [`NatForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## NatConfig [[autodoc]] NatConfig ## NatModel [[autodoc]] NatModel - forward ## NatForImageClassification [[autodoc]] NatForImageClassification - forward
transformers/docs/source/en/model_doc/nat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nat.md", "repo_id": "transformers", "token_count": 1229 }
243
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Perceiver ## Overview The Perceiver IO model was proposed in [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. Perceiver IO is a generalization of [Perceiver](https://arxiv.org/abs/2103.03206) to handle arbitrary outputs in addition to arbitrary inputs. The original Perceiver only produced a single classification label. In addition to classification labels, Perceiver IO can produce (for example) language, optical flow, and multimodal videos with audio. This is done using the same building blocks as the original Perceiver. The computational complexity of Perceiver IO is linear in the input and output size and the bulk of the processing occurs in the latent space, allowing us to process inputs and outputs that are much larger than can be handled by standard Transformers. This means, for example, Perceiver IO can do BERT-style masked language modeling directly using bytes instead of tokenized inputs. The abstract from the paper is the following: *The recently-proposed Perceiver model obtains good results on several domains (images, audio, multimodal, point clouds) while scaling linearly in compute and memory with the input size. While the Perceiver supports many kinds of inputs, it can only produce very simple outputs such as class scores. Perceiver IO overcomes this limitation without sacrificing the original's appealing properties by learning to flexibly query the model's latent space to produce outputs of arbitrary size and semantics. Perceiver IO still decouples model depth from data size and still scales linearly with data size, but now with respect to both input and output sizes. The full Perceiver IO model achieves strong results on tasks with highly structured output spaces, such as natural language and visual understanding, StarCraft II, and multi-task and multi-modal domains. As highlights, Perceiver IO matches a Transformer-based BERT baseline on the GLUE language benchmark without the need for input tokenization and achieves state-of-the-art performance on Sintel optical flow estimation.* Here's a TLDR explaining how Perceiver works: The main problem with the self-attention mechanism of the Transformer is that the time and memory requirements scale quadratically with the sequence length. Hence, models like BERT and RoBERTa are limited to a max sequence length of 512 tokens. Perceiver aims to solve this issue by, instead of performing self-attention on the inputs, perform it on a set of latent variables, and only use the inputs for cross-attention. In this way, the time and memory requirements don't depend on the length of the inputs anymore, as one uses a fixed amount of latent variables, like 256 or 512. These are randomly initialized, after which they are trained end-to-end using backpropagation. Internally, [`PerceiverModel`] will create the latents, which is a tensor of shape `(batch_size, num_latents, d_latents)`. One must provide `inputs` (which could be text, images, audio, you name it!) to the model, which it will use to perform cross-attention with the latents. The output of the Perceiver encoder is a tensor of the same shape. One can then, similar to BERT, convert the last hidden states of the latents to classification logits by averaging along the sequence dimension, and placing a linear layer on top of that to project the `d_latents` to `num_labels`. This was the idea of the original Perceiver paper. However, it could only output classification logits. In a follow-up work, PerceiverIO, they generalized it to let the model also produce outputs of arbitrary size. How, you might ask? The idea is actually relatively simple: one defines outputs of an arbitrary size, and then applies cross-attention with the last hidden states of the latents, using the outputs as queries, and the latents as keys and values. So let's say one wants to perform masked language modeling (BERT-style) with the Perceiver. As the Perceiver's input length will not have an impact on the computation time of the self-attention layers, one can provide raw bytes, providing `inputs` of length 2048 to the model. If one now masks out certain of these 2048 tokens, one can define the `outputs` as being of shape: `(batch_size, 2048, 768)`. Next, one performs cross-attention with the final hidden states of the latents to update the `outputs` tensor. After cross-attention, one still has a tensor of shape `(batch_size, 2048, 768)`. One can then place a regular language modeling head on top, to project the last dimension to the vocabulary size of the model, i.e. creating logits of shape `(batch_size, 2048, 262)` (as Perceiver uses a vocabulary size of 262 byte IDs). <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/perceiver_architecture.jpg" alt="drawing" width="600"/> <small> Perceiver IO architecture. Taken from the <a href="https://arxiv.org/abs/2105.15203">original paper</a> </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/deepmind/deepmind-research/tree/master/perceiver). <Tip warning={true}> Perceiver does **not** work with `torch.nn.DataParallel` due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035) </Tip> ## Resources - The quickest way to get started with the Perceiver is by checking the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Perceiver). - Refer to the [blog post](https://huggingface.co/blog/perceiver) if you want to fully understand how the model works and is implemented in the library. Note that the models available in the library only showcase some examples of what you can do with the Perceiver. There are many more use cases, including question answering, named-entity recognition, object detection, audio classification, video classification, etc. - [Text classification task guide](../tasks/sequence_classification) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Image classification task guide](../tasks/image_classification) ## Perceiver specific outputs [[autodoc]] models.perceiver.modeling_perceiver.PerceiverModelOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverDecoderOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMaskedLMOutput [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassifierOutput ## PerceiverConfig [[autodoc]] PerceiverConfig ## PerceiverTokenizer [[autodoc]] PerceiverTokenizer - __call__ ## PerceiverFeatureExtractor [[autodoc]] PerceiverFeatureExtractor - __call__ ## PerceiverImageProcessor [[autodoc]] PerceiverImageProcessor - preprocess ## PerceiverTextPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverTextPreprocessor ## PerceiverImagePreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverImagePreprocessor ## PerceiverOneHotPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverOneHotPreprocessor ## PerceiverAudioPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverAudioPreprocessor ## PerceiverMultimodalPreprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor ## PerceiverProjectionDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverProjectionDecoder ## PerceiverBasicDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverBasicDecoder ## PerceiverClassificationDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassificationDecoder ## PerceiverOpticalFlowDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder ## PerceiverBasicVideoAutoencodingDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverBasicVideoAutoencodingDecoder ## PerceiverMultimodalDecoder [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder ## PerceiverProjectionPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverProjectionPostprocessor ## PerceiverAudioPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverAudioPostprocessor ## PerceiverClassificationPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverClassificationPostprocessor ## PerceiverMultimodalPostprocessor [[autodoc]] models.perceiver.modeling_perceiver.PerceiverMultimodalPostprocessor ## PerceiverModel [[autodoc]] PerceiverModel - forward ## PerceiverForMaskedLM [[autodoc]] PerceiverForMaskedLM - forward ## PerceiverForSequenceClassification [[autodoc]] PerceiverForSequenceClassification - forward ## PerceiverForImageClassificationLearned [[autodoc]] PerceiverForImageClassificationLearned - forward ## PerceiverForImageClassificationFourier [[autodoc]] PerceiverForImageClassificationFourier - forward ## PerceiverForImageClassificationConvProcessing [[autodoc]] PerceiverForImageClassificationConvProcessing - forward ## PerceiverForOpticalFlow [[autodoc]] PerceiverForOpticalFlow - forward ## PerceiverForMultimodalAutoencoding [[autodoc]] PerceiverForMultimodalAutoencoding - forward
transformers/docs/source/en/model_doc/perceiver.md/0
{ "file_path": "transformers/docs/source/en/model_doc/perceiver.md", "repo_id": "transformers", "token_count": 2762 }
244
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SigLIP ## Overview The SigLIP model was proposed in [Sigmoid Loss for Language Image Pre-Training](https://arxiv.org/abs/2303.15343) by Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, Lucas Beyer. SigLIP proposes to replace the loss function used in [CLIP](clip) by a simple pairwise sigmoid loss. This results in better performance in terms of zero-shot classification accuracy on ImageNet. The abstract from the paper is the following: *We propose a simple pairwise Sigmoid loss for Language-Image Pre-training (SigLIP). Unlike standard contrastive learning with softmax normalization, the sigmoid loss operates solely on image-text pairs and does not require a global view of the pairwise similarities for normalization. The sigmoid loss simultaneously allows further scaling up the batch size, while also performing better at smaller batch sizes. Combined with Locked-image Tuning, with only four TPUv4 chips, we train a SigLiT model that achieves 84.5% ImageNet zero-shot accuracy in two days. The disentanglement of the batch size from the loss further allows us to study the impact of examples vs pairs and negative to positive ratio. Finally, we push the batch size to the extreme, up to one million, and find that the benefits of growing batch size quickly diminish, with a more reasonable batch size of 32k being sufficient.* ## Usage tips - Usage of SigLIP is similar to [CLIP](clip). The main difference is the training loss, which does not require a global view of all the pairwise similarities of images and texts within a batch. One needs to apply the sigmoid activation function to the logits, rather than the softmax. - Training is not yet supported. If you want to fine-tune SigLIP or train from scratch, refer to the loss function from [OpenCLIP](https://github.com/mlfoundations/open_clip/blob/73ad04ae7fb93ede1c02dc9040a828634cb1edf1/src/open_clip/loss.py#L307), which leverages various `torch.distributed` utilities. - When using the standalone [`SiglipTokenizer`] or [`SiglipProcessor`], make sure to pass `padding="max_length"` as that's how the model was trained. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/siglip_table.jpeg" alt="drawing" width="600"/> <small> SigLIP evaluation results compared to CLIP. Taken from the <a href="https://arxiv.org/abs/2303.15343">original paper</a>.</small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/big_vision/tree/main). ## Usage example There are 2 main ways to use SigLIP: either using the pipeline API, which abstracts away all the complexity for you, or by using the `SiglipModel` class yourself. ### Pipeline API The pipeline allows to use the model in a few lines of code: ```python >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> # load pipe >>> image_classifier = pipeline(task="zero-shot-image-classification", model="google/siglip-base-patch16-224") >>> # load image >>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg' >>> image = Image.open(requests.get(url, stream=True).raw) >>> # inference >>> outputs = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) >>> outputs = [{"score": round(output["score"], 4), "label": output["label"] } for output in outputs] >>> print(outputs) [{'score': 0.1979, 'label': '2 cats'}, {'score': 0.0, 'label': 'a remote'}, {'score': 0.0, 'label': 'a plane'}] ``` ### Using the model yourself If you want to do the pre- and postprocessing yourself, here's how to do that: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, AutoModel >>> import torch >>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224") >>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a photo of 2 cats", "a photo of 2 dogs"] >>> # important: we pass `padding=max_length` since the model was trained with this >>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image >>> probs = torch.sigmoid(logits_per_image) # these are the probabilities >>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'") 31.9% that image 0 is 'a photo of 2 cats' ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with SigLIP. - [Zero-shot image classification task guide](../tasks/zero_shot_image_classification_md) - Demo notebooks for SigLIP can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/SigLIP). 🌎 If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## SiglipConfig [[autodoc]] SiglipConfig - from_text_vision_configs ## SiglipTextConfig [[autodoc]] SiglipTextConfig ## SiglipVisionConfig [[autodoc]] SiglipVisionConfig ## SiglipTokenizer [[autodoc]] SiglipTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## SiglipImageProcessor [[autodoc]] SiglipImageProcessor - preprocess ## SiglipProcessor [[autodoc]] SiglipProcessor ## SiglipModel [[autodoc]] SiglipModel - forward - get_text_features - get_image_features ## SiglipTextModel [[autodoc]] SiglipTextModel - forward ## SiglipVisionModel [[autodoc]] SiglipVisionModel - forward ## SiglipForImageClassification [[autodoc]] SiglipForImageClassification - forward
transformers/docs/source/en/model_doc/siglip.md/0
{ "file_path": "transformers/docs/source/en/model_doc/siglip.md", "repo_id": "transformers", "token_count": 1992 }
245
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Table Transformer ## Overview The Table Transformer model was proposed in [PubTables-1M: Towards comprehensive table extraction from unstructured documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham. The authors introduce a new dataset, PubTables-1M, to benchmark progress in table extraction from unstructured documents, as well as table structure recognition and functional analysis. The authors train 2 [DETR](detr) models, one for table detection and one for table structure recognition, dubbed Table Transformers. The abstract from the paper is the following: *Recently, significant progress has been made applying machine learning to the problem of table structure inference and extraction from unstructured documents. However, one of the greatest challenges remains the creation of datasets with complete, unambiguous ground truth at scale. To address this, we develop a new, more comprehensive dataset for table extraction, called PubTables-1M. PubTables-1M contains nearly one million tables from scientific articles, supports multiple input modalities, and contains detailed header and location information for table structures, making it useful for a wide variety of modeling approaches. It also addresses a significant source of ground truth inconsistency observed in prior datasets called oversegmentation, using a novel canonicalization procedure. We demonstrate that these improvements lead to a significant increase in training performance and a more reliable estimate of model performance at evaluation for table structure recognition. Further, we show that transformer-based object detection models trained on PubTables-1M produce excellent results for all three tasks of detection, structure recognition, and functional analysis without the need for any special customization for these tasks.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/table_transformer_architecture.jpeg" alt="drawing" width="600"/> <small> Table detection and table structure recognition clarified. Taken from the <a href="https://arxiv.org/abs/2110.00061">original paper</a>. </small> The authors released 2 models, one for [table detection](https://huggingface.co/microsoft/table-transformer-detection) in documents, one for [table structure recognition](https://huggingface.co/microsoft/table-transformer-structure-recognition) (the task of recognizing the individual rows, columns etc. in a table). This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/table-transformer). ## Resources <PipelineTag pipeline="object-detection"/> - A demo notebook for the Table Transformer can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Table%20Transformer). - It turns out padding of images is quite important for detection. An interesting Github thread with replies from the authors can be found [here](https://github.com/microsoft/table-transformer/issues/68). ## TableTransformerConfig [[autodoc]] TableTransformerConfig ## TableTransformerModel [[autodoc]] TableTransformerModel - forward ## TableTransformerForObjectDetection [[autodoc]] TableTransformerForObjectDetection - forward
transformers/docs/source/en/model_doc/table-transformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/table-transformer.md", "repo_id": "transformers", "token_count": 978 }
246
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UPerNet ## Overview The UPerNet model was proposed in [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. UPerNet is a general framework to effectively segment a wide range of concepts from images, leveraging any vision backbone like [ConvNeXt](convnext) or [Swin](swin). The abstract from the paper is the following: *Humans recognize the visual world at multiple levels: we effortlessly categorize scenes and detect objects inside, while also identifying the textures and surfaces of the objects along with their different compositional parts. In this paper, we study a new task called Unified Perceptual Parsing, which requires the machine vision systems to recognize as many visual concepts as possible from a given image. A multi-task framework called UPerNet and a training strategy are developed to learn from heterogeneous image annotations. We benchmark our framework on Unified Perceptual Parsing and show that it is able to effectively segment a wide range of concepts from images. The trained networks are further applied to discover visual knowledge in natural scenes.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/upernet_architecture.jpg" alt="drawing" width="600"/> <small> UPerNet framework. Taken from the <a href="https://arxiv.org/abs/1807.10221">original paper</a>. </small> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code is based on OpenMMLab's mmsegmentation [here](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/uper_head.py). ## Usage examples UPerNet is a general framework for semantic segmentation. It can be used with any vision backbone, like so: ```py from transformers import SwinConfig, UperNetConfig, UperNetForSemanticSegmentation backbone_config = SwinConfig(out_features=["stage1", "stage2", "stage3", "stage4"]) config = UperNetConfig(backbone_config=backbone_config) model = UperNetForSemanticSegmentation(config) ``` To use another vision backbone, like [ConvNeXt](convnext), simply instantiate the model with the appropriate backbone: ```py from transformers import ConvNextConfig, UperNetConfig, UperNetForSemanticSegmentation backbone_config = ConvNextConfig(out_features=["stage1", "stage2", "stage3", "stage4"]) config = UperNetConfig(backbone_config=backbone_config) model = UperNetForSemanticSegmentation(config) ``` Note that this will randomly initialize all the weights of the model. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with UPerNet. - Demo notebooks for UPerNet can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UPerNet). - [`UperNetForSemanticSegmentation`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/semantic-segmentation) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb). - See also: [Semantic segmentation task guide](../tasks/semantic_segmentation) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## UperNetConfig [[autodoc]] UperNetConfig ## UperNetForSemanticSegmentation [[autodoc]] UperNetForSemanticSegmentation - forward
transformers/docs/source/en/model_doc/upernet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/upernet.md", "repo_id": "transformers", "token_count": 1188 }
247
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # X-MOD ## Overview The X-MOD model was proposed in [Lifting the Curse of Multilinguality by Pre-training Modular Transformers](http://dx.doi.org/10.18653/v1/2022.naacl-main.255) by Jonas Pfeiffer, Naman Goyal, Xi Lin, Xian Li, James Cross, Sebastian Riedel, and Mikel Artetxe. X-MOD extends multilingual masked language models like [XLM-R](xlm-roberta) to include language-specific modular components (_language adapters_) during pre-training. For fine-tuning, the language adapters in each transformer layer are frozen. The abstract from the paper is the following: *Multilingual pre-trained models are known to suffer from the curse of multilinguality, which causes per-language performance to drop as they cover more languages. We address this issue by introducing language-specific modules, which allows us to grow the total capacity of the model, while keeping the total number of trainable parameters per language constant. In contrast with prior work that learns language-specific components post-hoc, we pre-train the modules of our Cross-lingual Modular (X-MOD) models from the start. Our experiments on natural language inference, named entity recognition and question answering show that our approach not only mitigates the negative interference between languages, but also enables positive transfer, resulting in improved monolingual and cross-lingual performance. Furthermore, our approach enables adding languages post-hoc with no measurable drop in performance, no longer limiting the model usage to the set of pre-trained languages.* This model was contributed by [jvamvas](https://huggingface.co/jvamvas). The original code can be found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/fairseq/models/xmod) and the original documentation is found [here](https://github.com/facebookresearch/fairseq/tree/58cc6cca18f15e6d56e3f60c959fe4f878960a60/examples/xmod). ## Usage tips Tips: - X-MOD is similar to [XLM-R](xlm-roberta), but a difference is that the input language needs to be specified so that the correct language adapter can be activated. - The main models – base and large – have adapters for 81 languages. ## Adapter Usage ### Input language There are two ways to specify the input language: 1. By setting a default language before using the model: ```python from transformers import XmodModel model = XmodModel.from_pretrained("facebook/xmod-base") model.set_default_language("en_XX") ``` 2. By explicitly passing the index of the language adapter for each sample: ```python import torch input_ids = torch.tensor( [ [0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2], [0, 1310, 49083, 443, 269, 71, 5486, 165, 60429, 660, 23, 2], ] ) lang_ids = torch.LongTensor( [ 0, # en_XX 8, # de_DE ] ) output = model(input_ids, lang_ids=lang_ids) ``` ### Fine-tuning The paper recommends that the embedding layer and the language adapters are frozen during fine-tuning. A method for doing this is provided: ```python model.freeze_embeddings_and_language_adapters() # Fine-tune the model ... ``` ### Cross-lingual transfer After fine-tuning, zero-shot cross-lingual transfer can be tested by activating the language adapter of the target language: ```python model.set_default_language("de_DE") # Evaluate the model on German examples ... ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XmodConfig [[autodoc]] XmodConfig ## XmodModel [[autodoc]] XmodModel - forward ## XmodForCausalLM [[autodoc]] XmodForCausalLM - forward ## XmodForMaskedLM [[autodoc]] XmodForMaskedLM - forward ## XmodForSequenceClassification [[autodoc]] XmodForSequenceClassification - forward ## XmodForMultipleChoice [[autodoc]] XmodForMultipleChoice - forward ## XmodForTokenClassification [[autodoc]] XmodForTokenClassification - forward ## XmodForQuestionAnswering [[autodoc]] XmodForQuestionAnswering - forward
transformers/docs/source/en/model_doc/xmod.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xmod.md", "repo_id": "transformers", "token_count": 1496 }
248
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Training on Multiple GPUs If training a model on a single GPU is too slow or if the model's weights do not fit in a single GPU's memory, transitioning to a multi-GPU setup may be a viable option. Prior to making this transition, thoroughly explore all the strategies covered in the [Methods and tools for efficient training on a single GPU](perf_train_gpu_one) as they are universally applicable to model training on any number of GPUs. Once you have employed those strategies and found them insufficient for your case on a single GPU, consider moving to multiple GPUs. Transitioning from a single GPU to multiple GPUs requires the introduction of some form of parallelism, as the workload must be distributed across the resources. Multiple techniques can be employed to achieve parallelism, such as data parallelism, tensor parallelism, and pipeline parallelism. It's important to note that there isn't a one-size-fits-all solution, and the optimal settings depend on the specific hardware configuration you are using. This guide offers an in-depth overview of individual types of parallelism, as well as guidance on ways to combine techniques and choosing an appropriate approach. For step-by-step tutorials on distributed training, please refer to the [🤗 Accelerate documentation](https://huggingface.co/docs/accelerate/index). <Tip> While the main concepts discussed in this guide are likely applicable across frameworks, here we focus on PyTorch-based implementations. </Tip> Before diving deeper into the specifics of each technique, let's go over the rough decision process when training large models on a large infrastructure. ## Scalability strategy Begin by estimating how much vRAM is required to train your model. For models hosted on the 🤗 Hub, use our [Model Memory Calculator](https://huggingface.co/spaces/hf-accelerate/model-memory-usage), which gives you accurate calculations within a few percent margin. **Parallelization strategy for a single Node / multi-GPU setup** When training a model on a single node with multiple GPUs, your choice of parallelization strategy can significantly impact performance. Here's a breakdown of your options: **Case 1: Your model fits onto a single GPU** If your model can comfortably fit onto a single GPU, you have two primary options: 1. DDP - Distributed DataParallel 2. ZeRO - depending on the situation and configuration used, this method may or may not be faster, however, it's worth experimenting with it. **Case 2: Your model doesn't fit onto a single GPU:** If your model is too large for a single GPU, you have several alternatives to consider: 1. PipelineParallel (PP) 2. ZeRO 3. TensorParallel (TP) With very fast inter-node connectivity (e.g., NVLINK or NVSwitch) all three strategies (PP, ZeRO, TP) should result in similar performance. However, without these, PP will be faster than TP or ZeRO. The degree of TP may also make a difference. It's best to experiment with your specific setup to determine the most suitable strategy. TP is almost always used within a single node. That is TP size <= GPUs per node. **Case 3: Largest layer of your model does not fit onto a single GPU** 1. If you are not using ZeRO, you have to use TensorParallel (TP), because PipelineParallel (PP) alone won't be sufficient to accommodate the large layer. 2. If you are using ZeRO, additionally adopt techniques from the [Methods and tools for efficient training on a single GPU](perf_train_gpu_one). **Parallelization strategy for a multi-Node / multi-GPU setup** * When you have fast inter-node connectivity (e.g., NVLINK or NVSwitch) consider using one of these options: 1. ZeRO - as it requires close to no modifications to the model 2. A combination of PipelineParallel(PP) with TensorParallel(TP) and DataParallel(DP) - this approach will result in fewer communications, but requires significant changes to the model * When you have slow inter-node connectivity and still low on GPU memory: 1. Employ a combination of DataParallel(DP) with PipelineParallel(PP), TensorParallel(TP), and ZeRO. In the following sections of this guide we dig deeper into how these different parallelism methods work. ## Data Parallelism Even with only 2 GPUs, you can readily leverage the accelerated training capabilities offered by PyTorch's built-in features, such as `DataParallel` (DP) and `DistributedDataParallel` (DDP). Note that [PyTorch documentation](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html) recommends to prefer `DistributedDataParallel` (DDP) over `DataParallel` (DP) for multi-GPU training as it works for all models. Let's take a look at how these two methods work and what makes them different. ### DataParallel vs DistributedDataParallel To understand the key differences in inter-GPU communication overhead between the two methods, let's review the processes per batch: [DDP](https://pytorch.org/docs/master/notes/ddp.html): - At the start time the main process replicates the model once from GPU 0 to the rest of GPUs - Then for each batch: 1. Each GPU directly consumes its mini-batch of data. 2. During `backward`, once the local gradients are ready, they are averaged across all processes. [DP](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html): For each batch: 1. GPU 0 reads the batch of data and then sends a mini-batch to each GPU. 2. The up-to-date model is replicated from GPU 0 to each GPU. 3. `forward` is executed, and output from each GPU is sent to GPU 0 to compute the loss. 4. The loss is distributed from GPU 0 to all GPUs, and `backward` is run. 5. Gradients from each GPU are sent to GPU 0 and averaged. Key differences include: 1. DDP performs only a single communication per batch - sending gradients, while DP performs five different data exchanges per batch. DDP copies data using [torch.distributed](https://pytorch.org/docs/master/distributed.html), while DP copies data within the process via Python threads (which introduces limitations associated with GIL). As a result, **`DistributedDataParallel` (DDP) is generally faster than `DataParallel` (DP)** unless you have slow GPU card inter-connectivity. 2. Under DP, GPU 0 performs significantly more work than other GPUs, resulting in GPU under-utilization. 3. DDP supports distributed training across multiple machines, whereas DP does not. This is not an exhaustive list of differences between DP and DDP, however, other nuances are out of scope of this guide. You can get a deeper understanding of these methods by reading this [article](https://www.telesens.co/2019/04/04/distributed-data-parallel-training-using-pytorch-on-aws/). Let's illustrate the differences between DP and DDP with an experiment. We'll benchmark the differences between DP and DDP with an added context of NVLink presence: * Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`). * Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`. To disable the NVLink feature on one of the benchmarks, we use `NCCL_P2P_DISABLE=1`. Here is the benchmarking code and outputs: **DP** ```bash rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ python examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 110.5948, 'train_samples_per_second': 1.808, 'epoch': 0.69} ``` **DDP w/ NVlink** ```bash rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} ``` **DDP w/o NVlink** ```bash rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Here are the same benchmarking results gathered in a table for convenience: | Type | NVlink | Time | | :----- | ----- | ---: | | 2:DP | Y | 110s | | 2:DDP | Y | 101s | | 2:DDP | N | 131s | As you can see, in this case DP is ~10% slower than DDP with NVlink, but ~15% faster than DDP without NVlink. The real difference will depend on how much data each GPU needs to sync with the others - the more there is to sync, the more a slow link will impede the overall runtime. ## ZeRO Data Parallelism ZeRO-powered data parallelism (ZeRO-DP) is illustrated in the following diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/). <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png" alt="DeepSpeed-Image-1"/> </div> While it may appear complex, it is a very similar concept to `DataParallel` (DP). The difference is that instead of replicating the full model parameters, gradients and optimizer states, each GPU stores only a slice of it. Then, at run-time when the full layer parameters are needed just for the given layer, all GPUs synchronize to give each other parts that they miss. To illustrate this idea, consider a simple model with 3 layers (La, Lb, and Lc), where each layer has 3 parameters. Layer La, for example, has weights a0, a1 and a2: ``` La | Lb | Lc ---|----|--- a0 | b0 | c0 a1 | b1 | c1 a2 | b2 | c2 ``` If we have 3 GPUs, ZeRO-DP splits the model onto 3 GPUs like so: ``` GPU0: La | Lb | Lc ---|----|--- a0 | b0 | c0 GPU1: La | Lb | Lc ---|----|--- a1 | b1 | c1 GPU2: La | Lb | Lc ---|----|--- a2 | b2 | c2 ``` In a way, this is the same horizontal slicing as tensor parallelism, as opposed to Vertical slicing, where one puts whole layer-groups on different GPUs. Now let's see how this works: Each of these GPUs will get the usual mini-batch as it works in DP: ``` x0 => GPU0 x1 => GPU1 x2 => GPU2 ``` The inputs are passed without modifications as if they would be processed by the original model. First, the inputs get to the layer `La`. What happens at this point? On GPU0: the x0 mini-batch requires the a0, a1, a2 parameters to do its forward path through the layer, but the GPU0 has only a0. It will get a1 from GPU1 and a2 from GPU2, bringing all the pieces of the model together. In parallel, GPU1 gets another mini-batch - x1. GPU1 has the a1 parameter, but needs a0 and a2, so it gets those from GPU0 and GPU2. Same happens to GPU2 that gets the mini-batch x2. It gets a0 and a1 from GPU0 and GPU1. This way each of the 3 GPUs gets the full tensors reconstructed and makes a forward pass with its own mini-batch. As soon as the calculation is done, the data that is no longer needed gets dropped - it's only used during the calculation. The reconstruction is done efficiently via a pre-fetch. Then the whole process is repeated for layer Lb, then Lc forward-wise, and then backward Lc -> Lb -> La. <Tip> This mechanism is similar to an efficient group backpacking strategy: person A carries the tent, person B carries the stove, and person C carries the axe. Each night they all share what they have with others and get from others what they don't have, and in the morning they pack up their allocated type of gear and continue on their way. This is what ZeRO DP/Sharded DDP is. Compare this strategy to the simple one where each person has to carry their own tent, stove and axe (similar to DataParallel (DP and DDP) in PyTorch), which would be far more inefficient. </Tip> While reading the literature on this topic you may encounter the following synonyms: Sharded, Partitioned. If you pay close attention the way ZeRO partitions the model's weights - it looks very similar to tensor parallelism which will be discussed later. This is because it partitions/shards each layer's weights, unlike vertical model parallelism which is discussed next. Implementations: - [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) ZeRO-DP stages 1+2+3 - [`Accelerate` integration](https://huggingface.co/docs/accelerate/en/usage_guides/deepspeed) - [`transformers` integration](main_classes/trainer#trainer-integrations) ## From Naive Model Parallelism to Pipeline Parallelism To explain Pipeline parallelism, we'll first look into Naive Model Parallelism (MP), also known as Vertical MP. This approach involves distributing groups of model layers across multiple GPUs by assigning specific layers to specific GPUs with `.to()`. As data flows through these layers, it is moved to the same GPU as the layer, while the other layers remain untouched. We refer to this Model parallelism as "Vertical" because of how models are typically visualized. For example, the following diagram shows an 8-layer model split vertically into two slices, placing layers 0-3 onto GPU0 and 4-7 to GPU1: ``` ================ | Layer | | | 0 | | | 1 | GPU0 | | 2 | | | 3 | | ================ | Layer | | | 4 | | | 5 | GPU1 | | 6 | | | 7 | | ================ ``` In this example, when data moves from layer 0 to 3, it's no different from regular forward pass. However, passing data from layer 3 to 4 requires moving it from GPU0 to GPU1, introducing a communication overhead. If the participating GPUs are on the same compute node (e.g. same physical machine) this copying is fast, but if the GPUs are distributed across different compute nodes (e.g. multiple machines), the communication overhead could be substantially greater. Following that, layers 4 to 7 work as they would in the original model. Upon completion of the 7th layer, there is often a need to send the data back to layer 0 where the labels are (or alternatively send the labels to the last layer). Now the loss can be computed and the optimizer can do its work. Naive Model Parallelism comes several shortcomings: - **All but one GPU are idle at any given moment**: if 4 GPUs are used, it's nearly identical to quadrupling the amount of memory of a single GPU, and ignoring the rest of the hardware. - **Overhead in data transfer between devices**: E.g. 4x 6GB cards will be able to accommodate the same size as 1x 24GB card using naive MP, but a single 24GB card will complete the training faster, because it doesn't have the data copying overhead. But, say, if you have 40GB cards and need to fit a 45GB model you can with 4x 40GB cards (but barely because of the gradient and optimizer states) - **Copying shared embeddings**: Shared embeddings may need to get copied back and forth between GPUs. Now that you are familiar with how the naive approach to model parallelism works and its shortcomings, let's look at Pipeline Parallelism (PP). PP is almost identical to a naive MP, but it solves the GPU idling problem by chunking the incoming batch into micro-batches and artificially creating a pipeline, which allows different GPUs to concurrently participate in the computation process. The following illustration from the [GPipe paper](https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html) shows the naive MP on the top, and PP on the bottom: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-gpipe-bubble.png" alt="MP vs PP"/> </div> At the bottom of the diagram, you can observe that the Pipeline Parallelism (PP) approach minimizes the number of idle GPU zones, referred to as 'bubbles'. Both parts of the diagram show a parallelism level of degree 4, meaning that 4 GPUs are involved in the pipeline. You can see that there's a forward path of 4 pipe stages (F0, F1, F2 and F3) followed by a backward path in reverse order (B3, B2, B1, and B0). PP introduces a new hyperparameter to tune - `chunks`, which determines how many data chunks are sent in a sequence through the same pipe stage. For example, in the bottom diagram you can see `chunks=4`. GPU0 performs the same forward path on chunk 0, 1, 2 and 3 (F0,0, F0,1, F0,2, F0,3) and then it waits for other GPUs to do complete their work. Only when the other GPUs begin to complete their work, GPU0 starts to work again doing the backward path for chunks 3, 2, 1 and 0 (B0,3, B0,2, B0,1, B0,0). Note that this is the same concept as gradient accumulation steps. PyTorch uses `chunks`, while DeepSpeed refers to the same hyperparameter as gradient accumulation steps. Because of the chunks, PP introduces the notion of micro-batches (MBS). DP splits the global data batch size into mini-batches, so if you have a DP degree of 4, a global batch size of 1024 gets split up into 4 mini-batches of 256 each (1024/4). And if the number of `chunks` (or GAS) is 32 we end up with a micro-batch size of 8 (256/32). Each Pipeline stage works with a single micro-batch at a time. To calculate the global batch size of the DP + PP setup, use the formula: `mbs * chunks * dp_degree` (`8 * 32 * 4 = 1024`). With `chunks=1` you end up with the naive MP, which is inefficient. With a large `chunks` value you end up with tiny micro-batch sizes which is also inefficient. For this reason, we encourage to experiment with the `chunks` value to find the one that leads to the most efficient GPUs utilization. You may notice a bubble of "dead" time on the diagram that can't be parallelized because the last `forward` stage has to wait for `backward` to complete the pipeline. The purpose of finding the best value for `chunks` is to enable a high concurrent GPU utilization across all participating GPUs which translates to minimizing the size of the bubble. Pipeline API solutions have been implemented in: - PyTorch - DeepSpeed - Megatron-LM These come with some shortcomings: - They have to modify the model quite heavily, because Pipeline requires one to rewrite the normal flow of modules into a `nn.Sequential` sequence of the same, which may require changes to the design of the model. - Currently the Pipeline API is very restricted. If you had a bunch of Python variables being passed in the very first stage of the Pipeline, you will have to find a way around it. Currently, the pipeline interface requires either a single Tensor or a tuple of Tensors as the only input and output. These tensors must have a batch size as the very first dimension, since pipeline is going to chunk the mini batch into micro-batches. Possible improvements are being discussed here https://github.com/pytorch/pytorch/pull/50693 - Conditional control flow at the level of pipe stages is not possible - e.g., Encoder-Decoder models like T5 require special workarounds to handle a conditional encoder stage. - They have to arrange each layer so that the output of one layer becomes an input to the other layer. More recent solutions include: - Varuna - Sagemaker We have not experimented with Varuna and SageMaker but their papers report that they have overcome the list of problems mentioned above and that they require smaller changes to the user's model. Implementations: - [PyTorch](https://pytorch.org/docs/stable/pipeline.html) (initial support in pytorch-1.8, and progressively getting improved in 1.9 and more so in 1.10). Some [examples](https://github.com/pytorch/pytorch/blob/master/benchmarks/distributed/pipeline/pipe.py) - [DeepSpeed](https://www.deepspeed.ai/tutorials/pipeline/) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) has an internal implementation - no API. - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - this is a proprietary solution that can only be used on AWS. - [OSLO](https://github.com/tunib-ai/oslo) - this is implemented based on the Hugging Face Transformers. 🤗 Transformers status: as of this writing none of the models supports full-PP. GPT2 and T5 models have naive MP support. The main obstacle is being unable to convert the models to `nn.Sequential` and have all the inputs to be Tensors. This is because currently the models include many features that make the conversion very complicated, and will need to be removed to accomplish that. DeepSpeed and Megatron-LM integrations are available in [🤗 Accelerate](https://huggingface.co/docs/accelerate/main/en/usage_guides/deepspeed) Other approaches: DeepSpeed, Varuna and SageMaker use the concept of an [Interleaved Pipeline](https://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel-core-features.html) <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-sagemaker-interleaved-pipeline.png" alt="Interleaved pipeline execution"/> </div> Here the bubble (idle time) is further minimized by prioritizing backward passes. Varuna further attempts to improve the schedule by using simulations to discover the most efficient scheduling. OSLO has pipeline parallelism implementation based on the Transformers without `nn.Sequential` conversion. ## Tensor Parallelism In Tensor Parallelism, each GPU processes a slice of a tensor and only aggregates the full tensor for operations requiring it. To describe this method, this section of the guide relies on the concepts and diagrams from the [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) paper: [Efficient Large-Scale Language Model Training on GPU Clusters](https://arxiv.org/abs/2104.04473). The main building block of any transformer is a fully connected `nn.Linear` followed by a nonlinear activation `GeLU`. The dot dot-product part of it, following the Megatron's paper notation, can be written as `Y = GeLU(XA)`, where `X` is an input vector, `Y` is the output vector, and `A` is the weight matrix. If we look at the computation in matrix form, you can see how the matrix multiplication can be split between multiple GPUs: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_gemm.png" alt="Parallel GEMM"/> </div> If we split the weight matrix `A` column-wise across `N` GPUs and perform matrix multiplications `XA_1` through `XA_n` in parallel, then we will end up with `N` output vectors `Y_1, Y_2, ..., Y_n` which can be fed into `GeLU` independently: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-independent-gelu.png" alt="Independent GeLU"/> </div> Using this principle, we can update a multi-layer perceptron of arbitrary depth, without the need for any synchronization between GPUs until the very end, where we need to reconstruct the output vector from shards. The Megatron-LM paper authors provide a helpful illustration for that: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_shard_processing.png" alt="Parallel shard processing"/> </div> Parallelizing the multi-headed attention layers is even simpler, since they are already inherently parallel, due to having multiple independent heads! <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_self_attention.png" alt="Parallel self-attention"/> </div> Special considerations: TP requires very fast network, and therefore it's not advisable to do TP across more than one node. Practically, if a node has 4 GPUs, the highest TP degree is therefore 4. If you need a TP degree of 8, you need to use nodes that have at least 8 GPUs. This section is based on the original much more [detailed TP overview](https://github.com/huggingface/transformers/issues/10321#issuecomment-783543530). by [@anton-l](https://github.com/anton-l). Alternative names: - DeepSpeed calls it [tensor slicing](https://www.deepspeed.ai/training/#model-parallelism) Implementations: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) has an internal implementation, as it's very model-specific - [parallelformers](https://github.com/tunib-ai/parallelformers) (only inference at the moment) - [SageMaker](https://arxiv.org/abs/2111.05972) - this is a proprietary solution that can only be used on AWS. - [OSLO](https://github.com/tunib-ai/oslo) has the tensor parallelism implementation based on the Transformers. SageMaker combines TP with DP for a more efficient processing. 🤗 Transformers status: - core: not yet implemented in the core - but if you want inference [parallelformers](https://github.com/tunib-ai/parallelformers) provides this support for most of our models. So until this is implemented in the core you can use theirs. And hopefully training mode will be supported too. - Deepspeed-Inference also supports our BERT, GPT-2, and GPT-Neo models in their super-fast CUDA-kernel-based inference mode, see more [here](https://www.deepspeed.ai/tutorials/inference-tutorial/) 🤗 Accelerate integrates with [TP from Megatron-LM](https://huggingface.co/docs/accelerate/v0.23.0/en/usage_guides/megatron_lm). ## Data Parallelism + Pipeline Parallelism The following diagram from the DeepSpeed [pipeline tutorial](https://www.deepspeed.ai/tutorials/pipeline/) demonstrates how one can combine DP with PP. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero-dp-pp.png" alt="DP + PP-2d"/> </div> Here it's important to see how DP rank 0 doesn't see GPU2 and DP rank 1 doesn't see GPU3. To DP there is just GPUs 0 and 1 where it feeds data as if there were just 2 GPUs. GPU0 "secretly" offloads some of its load to GPU2 using PP. And GPU1 does the same by enlisting GPU3 to its aid. Since each dimension requires at least 2 GPUs, here you'd need at least 4 GPUs. Implementations: - [DeepSpeed](https://github.com/microsoft/DeepSpeed) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformers status: not yet implemented ## Data Parallelism + Pipeline Parallelism + Tensor Parallelism To get an even more efficient training a 3D parallelism is used where PP is combined with TP and DP. This can be seen in the following diagram. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-deepspeed-3d.png" alt="dp-pp-tp-3d"/> </div> This diagram is from a blog post [3D parallelism: Scaling to trillion-parameter models](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/), which is a good read as well. Since each dimension requires at least 2 GPUs, here you'd need at least 8 GPUs. Implementations: - [DeepSpeed](https://github.com/microsoft/DeepSpeed) - DeepSpeed also includes an even more efficient DP, which they call ZeRO-DP. - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformers status: not yet implemented, since we have no PP and TP. ## ZeRO Data Parallelism + Pipeline Parallelism + Tensor Parallelism One of the main features of DeepSpeed is ZeRO, which is a super-scalable extension of DP. It has already been discussed in [ZeRO Data Parallelism](#zero-data-parallelism). Normally it's a standalone feature that doesn't require PP or TP. But it can be combined with PP and TP. When ZeRO-DP is combined with PP (and optionally TP) it typically enables only ZeRO stage 1 (optimizer sharding). While it's theoretically possible to use ZeRO stage 2 (gradient sharding) with Pipeline Parallelism, it will have negative performance impacts. There would need to be an additional reduce-scatter collective for every micro-batch to aggregate the gradients before sharding, which adds a potentially significant communication overhead. By nature of Pipeline Parallelism, small micro-batches are used and instead the focus is on trying to balance arithmetic intensity (micro-batch size) with minimizing the Pipeline bubble (number of micro-batches). Therefore those communication costs are going to impact the performance. In addition, there are already fewer layers than normal due to PP and so the memory savings won't be huge. PP already reduces gradient size by ``1/PP``, and so gradient sharding savings on top of that are less significant than pure DP. ZeRO stage 3 is not a good choice either for the same reason - more inter-node communications required. And since we have ZeRO, the other benefit is ZeRO-Offload. Since this is stage 1 optimizer states can be offloaded to CPU. Implementations: - [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed) and [Megatron-Deepspeed from BigScience](https://github.com/bigscience-workshop/Megatron-DeepSpeed), which is the fork of the former repo. - [OSLO](https://github.com/tunib-ai/oslo) Important papers: - [Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model]( https://arxiv.org/abs/2201.11990) 🤗 Transformers status: not yet implemented, since we have no PP and TP. ## FlexFlow [FlexFlow](https://github.com/flexflow/FlexFlow) also solves the parallelization problem in a slightly different approach. Paper: ["Beyond Data and Model Parallelism for Deep Neural Networks" by Zhihao Jia, Matei Zaharia, Alex Aiken](https://arxiv.org/abs/1807.05358) It performs a sort of 4D Parallelism over Sample-Operator-Attribute-Parameter. 1. Sample = Data Parallelism (sample-wise parallel) 2. Operator = Parallelize a single operation into several sub-operations 3. Attribute = Data Parallelism (length-wise parallel) 4. Parameter = Model Parallelism (regardless of dimension - horizontal or vertical) Examples: * Sample Let's take 10 batches of sequence length 512. If we parallelize them by sample dimension into 2 devices, we get 10 x 512 which becomes be 5 x 2 x 512. * Operator If we perform layer normalization, we compute std first and mean second, and then we can normalize data. Operator parallelism allows computing std and mean in parallel. So if we parallelize them by operator dimension into 2 devices (cuda:0, cuda:1), first we copy input data into both devices, and cuda:0 computes std, cuda:1 computes mean at the same time. * Attribute We have 10 batches of 512 length. If we parallelize them by attribute dimension into 2 devices, 10 x 512 will be 10 x 2 x 256. * Parameter It is similar with tensor model parallelism or naive layer-wise model parallelism. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-flexflow.jpeg" alt="flex-flow-soap"/> </div> The significance of this framework is that it takes resources like (1) GPU/TPU/CPU vs. (2) RAM/DRAM vs. (3) fast-intra-connect/slow-inter-connect and it automatically optimizes all these algorithmically deciding which parallelisation to use where. One very important aspect is that FlexFlow is designed for optimizing DNN parallelizations for models with static and fixed workloads, since models with dynamic behavior may prefer different parallelization strategies across iterations. So the promise is very attractive - it runs a 30min simulation on the cluster of choice and it comes up with the best strategy to utilise this specific environment. If you add/remove/replace any parts it'll run and re-optimize the plan for that. And then you can train. A different setup will have its own custom optimization. 🤗 Transformers status: Transformers models are FX-trace-able via [transformers.utils.fx](https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py), which is a prerequisite for FlexFlow, however, changes are required on the FlexFlow side to make it work with Transformers models. ## GPU selection When training on multiple GPUs, you can specify the number of GPUs to use and in what order. This can be useful for instance when you have GPUs with different computing power and want to use the faster GPU first. The selection process works for both [DistributedDataParallel](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) and [DataParallel](https://pytorch.org/docs/stable/generated/torch.nn.DataParallel.html) to use only a subset of the available GPUs, and you don't need Accelerate or the [DeepSpeed integration](./main_classes/deepspeed). ### Number of GPUs For example, if you have 4 GPUs and you only want to use the first 2: <hfoptions id="select-gpu"> <hfoption id="torchrun"> Use the `--nproc_per_node` to select how many GPUs to use. ```bash torchrun --nproc_per_node=2 trainer-program.py ... ``` </hfoption> <hfoption id="Accelerate"> Use `--num_processes` to select how many GPUs to use. ```bash accelerate launch --num_processes 2 trainer-program.py ... ``` </hfoption> <hfoption id="DeepSpeed"> Use `--num_gpus` to select how many GPUs to use. ```bash deepspeed --num_gpus 2 trainer-program.py ... ``` </hfoption> </hfoptions> ### Order of GPUs Now, to select which GPUs to use and their order, you'll use the `CUDA_VISIBLE_DEVICES` environment variable. It is easiest to set the environment variable in a `~/bashrc` or another startup config file. `CUDA_VISIBLE_DEVICES` is used to map which GPUs are used. For example, if you have 4 GPUs (0, 1, 2, 3) and you only want to run GPUs 0 and 2: ```bash CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ... ``` Only the 2 physical GPUs (0 and 2) are "visible" to PyTorch and these are mapped to `cuda:0` and `cuda:1` respectively. You can also reverse the order of the GPUs to use 2 first. Now, the mapping is `cuda:1` for GPU 0 and `cuda:0` for GPU 2. ```bash CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ... ``` You can also set the `CUDA_VISIBLE_DEVICES` environment variable to an empty value to create an environment without GPUs. ```bash CUDA_VISIBLE_DEVICES= python trainer-program.py ... ``` <Tip warning={true}> As with any environment variable, they can be exported instead of being added to the command line. However, this is not recommended because it can be confusing if you forget how the environment variable was setup and you end up using the wrong GPUs. Instead, it is common practice to set the environment variable for a specific training run on the same command line. </Tip> `CUDA_DEVICE_ORDER` is an alternative environment variable you can use to control how the GPUs are ordered. You can either order them by: 1. PCIe bus ID's that matches the order of [`nvidia-smi`](https://developer.nvidia.com/nvidia-system-management-interface) and [`rocm-smi`](https://rocm.docs.amd.com/projects/rocm_smi_lib/en/latest/.doxygen/docBin/html/index.html) for NVIDIA and AMD GPUs respectively ```bash export CUDA_DEVICE_ORDER=PCI_BUS_ID ``` 2. GPU compute ability ```bash export CUDA_DEVICE_ORDER=FASTEST_FIRST ``` The `CUDA_DEVICE_ORDER` is especially useful if your training setup consists of an older and newer GPU, where the older GPU appears first, but you cannot physically swap the cards to make the newer GPU appear first. In this case, set `CUDA_DEVICE_ORDER=FASTEST_FIRST` to always use the newer and faster GPU first (`nvidia-smi` or `rocm-smi` still reports the GPUs in their PCIe order). Or you could also set `export CUDA_VISIBLE_DEVICES=1,0`.
transformers/docs/source/en/perf_train_gpu_many.md/0
{ "file_path": "transformers/docs/source/en/perf_train_gpu_many.md", "repo_id": "transformers", "token_count": 10524 }
249
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # What 🤗 Transformers can do 🤗 Transformers is a library of pretrained state-of-the-art models for natural language processing (NLP), computer vision, and audio and speech processing tasks. Not only does the library contain Transformer models, but it also has non-Transformer models like modern convolutional networks for computer vision tasks. If you look at some of the most popular consumer products today, like smartphones, apps, and televisions, odds are that some kind of deep learning technology is behind it. Want to remove a background object from a picture taken by your smartphone? This is an example of a panoptic segmentation task (don't worry if you don't know what this means yet, we'll describe it in the following sections!). This page provides an overview of the different speech and audio, computer vision, and NLP tasks that can be solved with the 🤗 Transformers library in just three lines of code! ## Audio Audio and speech processing tasks are a little different from the other modalities mainly because audio as an input is a continuous signal. Unlike text, a raw audio waveform can't be neatly split into discrete chunks the way a sentence can be divided into words. To get around this, the raw audio signal is typically sampled at regular intervals. If you take more samples within an interval, the sampling rate is higher, and the audio more closely resembles the original audio source. Previous approaches preprocessed the audio to extract useful features from it. It is now more common to start audio and speech processing tasks by directly feeding the raw audio waveform to a feature encoder to extract an audio representation. This simplifies the preprocessing step and allows the model to learn the most essential features. ### Audio classification Audio classification is a task that labels audio data from a predefined set of classes. It is a broad category with many specific applications, some of which include: * acoustic scene classification: label audio with a scene label ("office", "beach", "stadium") * acoustic event detection: label audio with a sound event label ("car horn", "whale calling", "glass breaking") * tagging: label audio containing multiple sounds (birdsongs, speaker identification in a meeting) * music classification: label music with a genre label ("metal", "hip-hop", "country") ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### Automatic speech recognition Automatic speech recognition (ASR) transcribes speech into text. It is one of the most common audio tasks due partly to speech being such a natural form of human communication. Today, ASR systems are embedded in "smart" technology products like speakers, phones, and cars. We can ask our virtual assistants to play music, set reminders, and tell us the weather. But one of the key challenges Transformer architectures have helped with is in low-resource languages. By pretraining on large amounts of speech data, finetuning the model on only one hour of labeled speech data in a low-resource language can still produce high-quality results compared to previous ASR systems trained on 100x more labeled data. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## Computer vision One of the first and earliest successful computer vision tasks was recognizing images of zip code numbers using a [convolutional neural network (CNN)](glossary#convolution). An image is composed of pixels, and each pixel has a numerical value. This makes it easy to represent an image as a matrix of pixel values. Each particular combination of pixel values describes the colors of an image. Two general ways computer vision tasks can be solved are: 1. Use convolutions to learn the hierarchical features of an image from low-level features to high-level abstract things. 2. Split an image into patches and use a Transformer to gradually learn how each image patch is related to each other to form an image. Unlike the bottom-up approach favored by a CNN, this is kind of like starting out with a blurry image and then gradually bringing it into focus. ### Image classification Image classification labels an entire image from a predefined set of classes. Like most classification tasks, there are many practical use cases for image classification, some of which include: * healthcare: label medical images to detect disease or monitor patient health * environment: label satellite images to monitor deforestation, inform wildland management or detect wildfires * agriculture: label images of crops to monitor plant health or satellite images for land use monitoring * ecology: label images of animal or plant species to monitor wildlife populations or track endangered species ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### Object detection Unlike image classification, object detection identifies multiple objects within an image and the objects' positions in an image (defined by the bounding box). Some example applications of object detection include: * self-driving vehicles: detect everyday traffic objects such as other vehicles, pedestrians, and traffic lights * remote sensing: disaster monitoring, urban planning, and weather forecasting * defect detection: detect cracks or structural damage in buildings, and manufacturing defects ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### Image segmentation Image segmentation is a pixel-level task that assigns every pixel in an image to a class. It differs from object detection, which uses bounding boxes to label and predict objects in an image because segmentation is more granular. Segmentation can detect objects at a pixel-level. There are several types of image segmentation: * instance segmentation: in addition to labeling the class of an object, it also labels each distinct instance of an object ("dog-1", "dog-2") * panoptic segmentation: a combination of semantic and instance segmentation; it labels each pixel with a semantic class **and** each distinct instance of an object Segmentation tasks are helpful in self-driving vehicles to create a pixel-level map of the world around them so they can navigate safely around pedestrians and other vehicles. It is also useful for medical imaging, where the task's finer granularity can help identify abnormal cells or organ features. Image segmentation can also be used in ecommerce to virtually try on clothes or create augmented reality experiences by overlaying objects in the real world through your camera. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### Depth estimation Depth estimation predicts the distance of each pixel in an image from the camera. This computer vision task is especially important for scene understanding and reconstruction. For example, in self-driving cars, vehicles need to understand how far objects like pedestrians, traffic signs, and other vehicles are to avoid obstacles and collisions. Depth information is also helpful for constructing 3D representations from 2D images and can be used to create high-quality 3D representations of biological structures or buildings. There are two approaches to depth estimation: * stereo: depths are estimated by comparing two images of the same image from slightly different angles * monocular: depths are estimated from a single image ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## Natural language processing NLP tasks are among the most common types of tasks because text is such a natural way for us to communicate. To get text into a format recognized by a model, it needs to be tokenized. This means dividing a sequence of text into separate words or subwords (tokens) and then converting these tokens into numbers. As a result, you can represent a sequence of text as a sequence of numbers, and once you have a sequence of numbers, it can be input into a model to solve all sorts of NLP tasks! ### Text classification Like classification tasks in any modality, text classification labels a sequence of text (it can be sentence-level, a paragraph, or a document) from a predefined set of classes. There are many practical applications for text classification, some of which include: * sentiment analysis: label text according to some polarity like `positive` or `negative` which can inform and support decision-making in fields like politics, finance, and marketing * content classification: label text according to some topic to help organize and filter information in news and social media feeds (`weather`, `sports`, `finance`, etc.) ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### Token classification In any NLP task, text is preprocessed by separating the sequence of text into individual words or subwords. These are known as [tokens](glossary#token). Token classification assigns each token a label from a predefined set of classes. Two common types of token classification are: * named entity recognition (NER): label a token according to an entity category like organization, person, location or date. NER is especially popular in biomedical settings, where it can label genes, proteins, and drug names. * part-of-speech tagging (POS): label a token according to its part-of-speech like noun, verb, or adjective. POS is useful for helping translation systems understand how two identical words are grammatically different (bank as a noun versus bank as a verb). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### Question answering Question answering is another token-level task that returns an answer to a question, sometimes with context (open-domain) and other times without context (closed-domain). This task happens whenever we ask a virtual assistant something like whether a restaurant is open. It can also provide customer or technical support and help search engines retrieve the relevant information you're asking for. There are two common types of question answering: * extractive: given a question and some context, the answer is a span of text from the context the model must extract * abstractive: given a question and some context, the answer is generated from the context; this approach is handled by the [`Text2TextGenerationPipeline`] instead of the [`QuestionAnsweringPipeline`] shown below ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### Summarization Summarization creates a shorter version of a text from a longer one while trying to preserve most of the meaning of the original document. Summarization is a sequence-to-sequence task; it outputs a shorter text sequence than the input. There are a lot of long-form documents that can be summarized to help readers quickly understand the main points. Legislative bills, legal and financial documents, patents, and scientific papers are a few examples of documents that could be summarized to save readers time and serve as a reading aid. Like question answering, there are two types of summarization: * extractive: identify and extract the most important sentences from the original text * abstractive: generate the target summary (which may include new words not in the input document) from the original text; the [`SummarizationPipeline`] uses the abstractive approach ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### Translation Translation converts a sequence of text in one language to another. It is important in helping people from different backgrounds communicate with each other, help translate content to reach wider audiences, and even be a learning tool to help people learn a new language. Along with summarization, translation is a sequence-to-sequence task, meaning the model receives an input sequence and returns a target output sequence. In the early days, translation models were mostly monolingual, but recently, there has been increasing interest in multilingual models that can translate between many pairs of languages. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### Language modeling Language modeling is a task that predicts a word in a sequence of text. It has become a very popular NLP task because a pretrained language model can be finetuned for many other downstream tasks. Lately, there has been a lot of interest in large language models (LLMs) which demonstrate zero- or few-shot learning. This means the model can solve tasks it wasn't explicitly trained to do! Language models can be used to generate fluent and convincing text, though you need to be careful since the text may not always be accurate. There are two types of language modeling: * causal: the model's objective is to predict the next token in a sequence, and future tokens are masked ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * masked: the model's objective is to predict a masked token in a sequence with full access to the tokens in the sequence ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.2236, 'token': 1761, 'token_str': ' platform', 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] ``` ## Multimodal Multimodal tasks require a model to process multiple data modalities (text, image, audio, video) to solve a particular problem. Image captioning is an example of a multimodal task where the model takes an image as input and outputs a sequence of text describing the image or some properties of the image. Although multimodal models work with different data types or modalities, internally, the preprocessing steps help the model convert all the data types into embeddings (vectors or list of numbers that holds meaningful information about the data). For a task like image captioning, the model learns relationships between image embeddings and text embeddings. ### Document question answering Document question answering is a task that answers natural language questions from a document. Unlike a token-level question answering task which takes text as input, document question answering takes an image of a document as input along with a question about the document and returns an answer. Document question answering can be used to parse structured documents and extract key information from it. In the example below, the total amount and change due can be extracted from a receipt. ```py >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> url = "https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/2/image/image.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") >>> preds = doc_question_answerer( ... question="What is the total amount?", ... image=image, ... ) >>> preds [{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] ``` Hopefully, this page has given you some more background information about all the types of tasks in each modality and the practical importance of each one. In the next [section](tasks_explained), you'll learn **how** 🤗 Transformers work to solve these tasks.
transformers/docs/source/en/task_summary.md/0
{ "file_path": "transformers/docs/source/en/task_summary.md", "repo_id": "transformers", "token_count": 5687 }
250
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLM prompting guide [[open-in-colab]] Large Language Models such as Falcon, LLaMA, etc. are pretrained transformer models initially trained to predict the next token given some input text. They typically have billions of parameters and have been trained on trillions of tokens for an extended period of time. As a result, these models become quite powerful and versatile, and you can use them to solve multiple NLP tasks out of the box by instructing the models with natural language prompts. Designing such prompts to ensure the optimal output is often called "prompt engineering". Prompt engineering is an iterative process that requires a fair amount of experimentation. Natural languages are much more flexible and expressive than programming languages, however, they can also introduce some ambiguity. At the same time, prompts in natural language are quite sensitive to changes. Even minor modifications in prompts can lead to wildly different outputs. While there is no exact recipe for creating prompts to match all cases, researchers have worked out a number of best practices that help to achieve optimal results more consistently. This guide covers the prompt engineering best practices to help you craft better LLM prompts and solve various NLP tasks. You'll learn: - [Basics of prompting](#basics-of-prompting) - [Best practices of LLM prompting](#best-practices-of-llm-prompting) - [Advanced prompting techniques: few-shot prompting and chain-of-thought](#advanced-prompting-techniques) - [When to fine-tune instead of prompting](#prompting-vs-fine-tuning) <Tip> Prompt engineering is only a part of the LLM output optimization process. Another essential component is choosing the optimal text generation strategy. You can customize how your LLM selects each of the subsequent tokens when generating the text without modifying any of the trainable parameters. By tweaking the text generation parameters, you can reduce repetition in the generated text and make it more coherent and human-sounding. Text generation strategies and parameters are out of scope for this guide, but you can learn more about these topics in the following guides: * [Generation with LLMs](../llm_tutorial) * [Text generation strategies](../generation_strategies) </Tip> ## Basics of prompting ### Types of models The majority of modern LLMs are decoder-only transformers. Some examples include: [LLaMA](../model_doc/llama), [Llama2](../model_doc/llama2), [Falcon](../model_doc/falcon), [GPT2](../model_doc/gpt2). However, you may encounter encoder-decoder transformer LLMs as well, for instance, [Flan-T5](../model_doc/flan-t5) and [BART](../model_doc/bart). Encoder-decoder-style models are typically used in generative tasks where the output **heavily** relies on the input, for example, in translation and summarization. The decoder-only models are used for all other types of generative tasks. When using a pipeline to generate text with an LLM, it's important to know what type of LLM you are using, because they use different pipelines. Run inference with decoder-only models with the `text-generation` pipeline: ```python >>> from transformers import pipeline >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> generator = pipeline('text-generation', model = 'openai-community/gpt2') >>> prompt = "Hello, I'm a language model" >>> generator(prompt, max_length = 30) [{'generated_text': "Hello, I'm a language model expert, so I'm a big believer in the concept that I know very well and then I try to look into"}] ``` To run inference with an encoder-decoder, use the `text2text-generation` pipeline: ```python >>> text2text_generator = pipeline("text2text-generation", model = 'google/flan-t5-base') >>> prompt = "Translate from English to French: I'm very happy to see you" >>> text2text_generator(prompt) [{'generated_text': 'Je suis très heureuse de vous rencontrer.'}] ``` ### Base vs instruct/chat models Most of the recent LLM checkpoints available on 🤗 Hub come in two versions: base and instruct (or chat). For example, [`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b) and [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct). Base models are excellent at completing the text when given an initial prompt, however, they are not ideal for NLP tasks where they need to follow instructions, or for conversational use. This is where the instruct (chat) versions come in. These checkpoints are the result of further fine-tuning of the pre-trained base versions on instructions and conversational data. This additional fine-tuning makes them a better choice for many NLP tasks. Let's illustrate some simple prompts that you can use with [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct) to solve some common NLP tasks. ### NLP tasks First, let's set up the environment: ```bash pip install -q transformers accelerate ``` Next, let's load the model with the appropriate pipeline (`"text-generation"`): ```python >>> from transformers import pipeline, AutoTokenizer >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> model = "tiiuae/falcon-7b-instruct" >>> tokenizer = AutoTokenizer.from_pretrained(model) >>> pipe = pipeline( ... "text-generation", ... model=model, ... tokenizer=tokenizer, ... torch_dtype=torch.bfloat16, ... device_map="auto", ... ) ``` <Tip> Note that Falcon models were trained using the `bfloat16` datatype, so we recommend you use the same. This requires a recent version of CUDA and works best on modern cards. </Tip> Now that we have the model loaded via the pipeline, let's explore how you can use prompts to solve NLP tasks. #### Text classification One of the most common forms of text classification is sentiment analysis, which assigns a label like "positive", "negative", or "neutral" to a sequence of text. Let's write a prompt that instructs the model to classify a given text (a movie review). We'll start by giving the instruction, and then specifying the text to classify. Note that instead of leaving it at that, we're also adding the beginning of the response - `"Sentiment: "`: ```python >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> prompt = """Classify the text into neutral, negative or positive. ... Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. ... Sentiment: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Classify the text into neutral, negative or positive. Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. Sentiment: Positive ``` As a result, the output contains a classification label from the list we have provided in the instructions, and it is a correct one! <Tip> You may notice that in addition to the prompt, we pass a `max_new_tokens` parameter. It controls the number of tokens the model shall generate, and it is one of the many text generation parameters that you can learn about in [Text generation strategies](../generation_strategies) guide. </Tip> #### Named Entity Recognition Named Entity Recognition (NER) is a task of finding named entities in a piece of text, such as a person, location, or organization. Let's modify the instructions in the prompt to make the LLM perform this task. Here, let's also set `return_full_text = False` so that output doesn't contain the prompt: ```python >>> torch.manual_seed(1) # doctest: +IGNORE_RESULT >>> prompt = """Return a list of named entities in the text. ... Text: The Golden State Warriors are an American professional basketball team based in San Francisco. ... Named entities: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=15, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") - Golden State Warriors - San Francisco ``` As you can see, the model correctly identified two named entities from the given text. #### Translation Another task LLMs can perform is translation. You can choose to use encoder-decoder models for this task, however, here, for the simplicity of the examples, we'll keep using Falcon-7b-instruct, which does a decent job. Once again, here's how you can write a basic prompt to instruct a model to translate a piece of text from English to Italian: ```python >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> prompt = """Translate the English text to Italian. ... Text: Sometimes, I've believed as many as six impossible things before breakfast. ... Translation: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=20, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") A volte, ho creduto a sei impossibili cose prima di colazione. ``` Here we've added a `do_sample=True` and `top_k=10` to allow the model to be a bit more flexible when generating output. #### Text summarization Similar to the translation, text summarization is another generative task where the output **heavily** relies on the input, and encoder-decoder models can be a better choice. However, decoder-style models can be used for this task as well. Previously, we have placed the instructions at the very beginning of the prompt. However, the very end of the prompt can also be a suitable location for instructions. Typically, it's better to place the instruction on one of the extreme ends. ```python >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT >>> prompt = """Permaculture is a design process mimicking the diversity, functionality and resilience of natural ecosystems. The principles and practices are drawn from traditional ecological knowledge of indigenous cultures combined with modern scientific understanding and technological innovations. Permaculture design provides a framework helping individuals and communities develop innovative, creative and effective strategies for meeting basic needs while preparing for and mitigating the projected impacts of climate change. ... Write a summary of the above text. ... Summary: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding. ``` #### Question answering For question answering task we can structure the prompt into the following logical components: instructions, context, question, and the leading word or phrase (`"Answer:"`) to nudge the model to start generating the answer: ```python >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> prompt = """Answer the question using the context below. ... Context: Gazpacho is a cold soup and drink made of raw, blended vegetables. Most gazpacho includes stale bread, tomato, cucumbers, onion, bell peppers, garlic, olive oil, wine vinegar, water, and salt. Northern recipes often include cumin and/or pimentón (smoked sweet paprika). Traditionally, gazpacho was made by pounding the vegetables in a mortar with a pestle; this more laborious method is still sometimes used as it helps keep the gazpacho cool and avoids the foam and silky consistency of smoothie versions made in blenders or food processors. ... Question: What modern tool is used to make gazpacho? ... Answer: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Modern tools are used, such as immersion blenders ``` #### Reasoning Reasoning is one of the most difficult tasks for LLMs, and achieving good results often requires applying advanced prompting techniques, like [Chain-of-though](#chain-of-thought). Let's try if we can make a model reason about a simple arithmetics task with a basic prompt: ```python >>> torch.manual_seed(5) # doctest: +IGNORE_RESULT >>> prompt = """There are 5 groups of students in the class. Each group has 4 students. How many students are there in the class?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: There are a total of 5 groups, so there are 5 x 4=20 students in the class. ``` Correct! Let's increase the complexity a little and see if we can still get away with a basic prompt: ```python >>> torch.manual_seed(6) # doctest: +IGNORE_RESULT >>> prompt = """I baked 15 muffins. I ate 2 muffins and gave 5 muffins to a neighbor. My partner then bought 6 more muffins and ate 2. How many muffins do we now have?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: The total number of muffins now is 21 ``` This is a wrong answer, it should be 12. In this case, this can be due to the prompt being too basic, or due to the choice of model, after all we've picked the smallest version of Falcon. Reasoning is difficult for models of all sizes, but larger models are likely to perform better. ## Best practices of LLM prompting In this section of the guide we have compiled a list of best practices that tend to improve the prompt results: * When choosing the model to work with, the latest and most capable models are likely to perform better. * Start with a simple and short prompt, and iterate from there. * Put the instructions at the beginning of the prompt, or at the very end. When working with large context, models apply various optimizations to prevent Attention complexity from scaling quadratically. This may make a model more attentive to the beginning or end of a prompt than the middle. * Clearly separate instructions from the text they apply to - more on this in the next section. * Be specific and descriptive about the task and the desired outcome - its format, length, style, language, etc. * Avoid ambiguous descriptions and instructions. * Favor instructions that say "what to do" instead of those that say "what not to do". * "Lead" the output in the right direction by writing the first word (or even begin the first sentence for the model). * Use advanced techniques like [Few-shot prompting](#few-shot-prompting) and [Chain-of-thought](#chain-of-thought) * Test your prompts with different models to assess their robustness. * Version and track the performance of your prompts. ## Advanced prompting techniques ### Few-shot prompting The basic prompts in the sections above are the examples of "zero-shot" prompts, meaning, the model has been given instructions and context, but no examples with solutions. LLMs that have been fine-tuned on instruction datasets, generally perform well on such "zero-shot" tasks. However, you may find that your task has more complexity or nuance, and, perhaps, you have some requirements for the output that the model doesn't catch on just from the instructions. In this case, you can try the technique called few-shot prompting. In few-shot prompting, we provide examples in the prompt giving the model more context to improve the performance. The examples condition the model to generate the output following the patterns in the examples. Here's an example: ```python >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961. ... Date: 04/12/1961 ... Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. ... Date:""" >>> sequences = pipe( ... prompt, ... max_new_tokens=8, ... do_sample=True, ... top_k=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Text: The first human went into space and orbited the Earth on April 12, 1961. Date: 04/12/1961 Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. Date: 09/28/1960 ``` In the above code snippet we used a single example to demonstrate the desired output to the model, so this can be called a "one-shot" prompting. However, depending on the task complexity you may need to use more than one example. Limitations of the few-shot prompting technique: - While LLMs can pick up on the patterns in the examples, these technique doesn't work well on complex reasoning tasks - Few-shot prompting requires creating lengthy prompts. Prompts with large number of tokens can increase computation and latency. There's also a limit to the length of the prompts. - Sometimes when given a number of examples, models can learn patterns that you didn't intend them to learn, e.g. that the third movie review is always negative. ### Chain-of-thought Chain-of-thought (CoT) prompting is a technique that nudges a model to produce intermediate reasoning steps thus improving the results on complex reasoning tasks. There are two ways of steering a model to producing the reasoning steps: - few-shot prompting by illustrating examples with detailed answers to questions, showing the model how to work through a problem. - by instructing the model to reason by adding phrases like "Let's think step by step" or "Take a deep breath and work through the problem step by step." If we apply the CoT technique to the muffins example from the [reasoning section](#reasoning) and use a larger model, such as (`tiiuae/falcon-180B-chat`) which you can play with in the [HuggingChat](https://huggingface.co/chat/), we'll get a significant improvement on the reasoning result: ```text Let's go through this step-by-step: 1. You start with 15 muffins. 2. You eat 2 muffins, leaving you with 13 muffins. 3. You give 5 muffins to your neighbor, leaving you with 8 muffins. 4. Your partner buys 6 more muffins, bringing the total number of muffins to 14. 5. Your partner eats 2 muffins, leaving you with 12 muffins. Therefore, you now have 12 muffins. ``` ## Prompting vs fine-tuning You can achieve great results by optimizing your prompts, however, you may still ponder whether fine-tuning a model would work better for your case. Here are some scenarios when fine-tuning a smaller model may be a preferred option: - Your domain is wildly different from what LLMs were pre-trained on and extensive prompt optimization did not yield sufficient results. - You need your model to work well in a low-resource language. - You need the model to be trained on sensitive data that is under strict regulations. - You have to use a small model due to cost, privacy, infrastructure or other limitations. In all of the above examples, you will need to make sure that you either already have or can easily obtain a large enough domain-specific dataset at a reasonable cost to fine-tune a model. You will also need to have enough time and resources to fine-tune a model. If the above examples are not the case for you, optimizing prompts can prove to be more beneficial.
transformers/docs/source/en/tasks/prompting.md/0
{ "file_path": "transformers/docs/source/en/tasks/prompting.md", "repo_id": "transformers", "token_count": 5573 }
251
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Summary of the tokenizers [[open-in-colab]] On this page, we will have a closer look at tokenization. <Youtube id="VFp38yj8h3A"/> As we saw in [the preprocessing tutorial](preprocessing), tokenizing a text is splitting it into words or subwords, which then are converted to ids through a look-up table. Converting words or subwords to ids is straightforward, so in this summary, we will focus on splitting a text into words or subwords (i.e. tokenizing a text). More specifically, we will look at the three main types of tokenizers used in 🤗 Transformers: [Byte-Pair Encoding (BPE)](#byte-pair-encoding), [WordPiece](#wordpiece), and [SentencePiece](#sentencepiece), and show examples of which tokenizer type is used by which model. Note that on each model page, you can look at the documentation of the associated tokenizer to know which tokenizer type was used by the pretrained model. For instance, if we look at [`BertTokenizer`], we can see that the model uses [WordPiece](#wordpiece). ## Introduction Splitting a text into smaller chunks is a task that is harder than it looks, and there are multiple ways of doing so. For instance, let's look at the sentence `"Don't you love 🤗 Transformers? We sure do."` <Youtube id="nhJxYji1aho"/> A simple way of tokenizing this text is to split it by spaces, which would give: ``` ["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."] ``` This is a sensible first step, but if we look at the tokens `"Transformers?"` and `"do."`, we notice that the punctuation is attached to the words `"Transformer"` and `"do"`, which is suboptimal. We should take the punctuation into account so that a model does not have to learn a different representation of a word and every possible punctuation symbol that could follow it, which would explode the number of representations the model has to learn. Taking punctuation into account, tokenizing our exemplary text would give: ``` ["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` Better. However, it is disadvantageous, how the tokenization dealt with the word `"Don't"`. `"Don't"` stands for `"do not"`, so it would be better tokenized as `["Do", "n't"]`. This is where things start getting complicated, and part of the reason each model has its own tokenizer type. Depending on the rules we apply for tokenizing a text, a different tokenized output is generated for the same text. A pretrained model only performs properly if you feed it an input that was tokenized with the same rules that were used to tokenize its training data. [spaCy](https://spacy.io/) and [Moses](http://www.statmt.org/moses/?n=Development.GetStarted) are two popular rule-based tokenizers. Applying them on our example, *spaCy* and *Moses* would output something like: ``` ["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` As can be seen space and punctuation tokenization, as well as rule-based tokenization, is used here. Space and punctuation tokenization and rule-based tokenization are both examples of word tokenization, which is loosely defined as splitting sentences into words. While it's the most intuitive way to split texts into smaller chunks, this tokenization method can lead to problems for massive text corpora. In this case, space and punctuation tokenization usually generates a very big vocabulary (the set of all unique words and tokens used). *E.g.*, [Transformer XL](model_doc/transformerxl) uses space and punctuation tokenization, resulting in a vocabulary size of 267,735! Such a big vocabulary size forces the model to have an enormous embedding matrix as the input and output layer, which causes both an increased memory and time complexity. In general, transformers models rarely have a vocabulary size greater than 50,000, especially if they are pretrained only on a single language. So if simple space and punctuation tokenization is unsatisfactory, why not simply tokenize on characters? <Youtube id="ssLq_EK2jLE"/> While character tokenization is very simple and would greatly reduce memory and time complexity it makes it much harder for the model to learn meaningful input representations. *E.g.* learning a meaningful context-independent representation for the letter `"t"` is much harder than learning a context-independent representation for the word `"today"`. Therefore, character tokenization is often accompanied by a loss of performance. So to get the best of both worlds, transformers models use a hybrid between word-level and character-level tokenization called **subword** tokenization. ## Subword tokenization <Youtube id="zHvTiHr506c"/> Subword tokenization algorithms rely on the principle that frequently used words should not be split into smaller subwords, but rare words should be decomposed into meaningful subwords. For instance `"annoyingly"` might be considered a rare word and could be decomposed into `"annoying"` and `"ly"`. Both `"annoying"` and `"ly"` as stand-alone subwords would appear more frequently while at the same time the meaning of `"annoyingly"` is kept by the composite meaning of `"annoying"` and `"ly"`. This is especially useful in agglutinative languages such as Turkish, where you can form (almost) arbitrarily long complex words by stringing together subwords. Subword tokenization allows the model to have a reasonable vocabulary size while being able to learn meaningful context-independent representations. In addition, subword tokenization enables the model to process words it has never seen before, by decomposing them into known subwords. For instance, the [`~transformers.BertTokenizer`] tokenizes `"I have a new GPU!"` as follows: ```py >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> tokenizer.tokenize("I have a new GPU!") ["i", "have", "a", "new", "gp", "##u", "!"] ``` Because we are considering the uncased model, the sentence was lowercased first. We can see that the words `["i", "have", "a", "new"]` are present in the tokenizer's vocabulary, but the word `"gpu"` is not. Consequently, the tokenizer splits `"gpu"` into known subwords: `["gp" and "##u"]`. `"##"` means that the rest of the token should be attached to the previous one, without space (for decoding or reversal of the tokenization). As another example, [`~transformers.XLNetTokenizer`] tokenizes our previously exemplary text as follows: ```py >>> from transformers import XLNetTokenizer >>> tokenizer = XLNetTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.") ["▁Don", "'", "t", "▁you", "▁love", "▁", "🤗", "▁", "Transform", "ers", "?", "▁We", "▁sure", "▁do", "."] ``` We'll get back to the meaning of those `"▁"` when we look at [SentencePiece](#sentencepiece). As one can see, the rare word `"Transformers"` has been split into the more frequent subwords `"Transform"` and `"ers"`. Let's now look at how the different subword tokenization algorithms work. Note that all of those tokenization algorithms rely on some form of training which is usually done on the corpus the corresponding model will be trained on. <a id='byte-pair-encoding'></a> ### Byte-Pair Encoding (BPE) Byte-Pair Encoding (BPE) was introduced in [Neural Machine Translation of Rare Words with Subword Units (Sennrich et al., 2015)](https://arxiv.org/abs/1508.07909). BPE relies on a pre-tokenizer that splits the training data into words. Pretokenization can be as simple as space tokenization, e.g. [GPT-2](model_doc/gpt2), [RoBERTa](model_doc/roberta). More advanced pre-tokenization include rule-based tokenization, e.g. [XLM](model_doc/xlm), [FlauBERT](model_doc/flaubert) which uses Moses for most languages, or [GPT](model_doc/gpt) which uses spaCy and ftfy, to count the frequency of each word in the training corpus. After pre-tokenization, a set of unique words has been created and the frequency with which each word occurred in the training data has been determined. Next, BPE creates a base vocabulary consisting of all symbols that occur in the set of unique words and learns merge rules to form a new symbol from two symbols of the base vocabulary. It does so until the vocabulary has attained the desired vocabulary size. Note that the desired vocabulary size is a hyperparameter to define before training the tokenizer. As an example, let's assume that after pre-tokenization, the following set of words including their frequency has been determined: ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` Consequently, the base vocabulary is `["b", "g", "h", "n", "p", "s", "u"]`. Splitting all words into symbols of the base vocabulary, we obtain: ``` ("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5) ``` BPE then counts the frequency of each possible symbol pair and picks the symbol pair that occurs most frequently. In the example above `"h"` followed by `"u"` is present _10 + 5 = 15_ times (10 times in the 10 occurrences of `"hug"`, 5 times in the 5 occurrences of `"hugs"`). However, the most frequent symbol pair is `"u"` followed by `"g"`, occurring _10 + 5 + 5 = 20_ times in total. Thus, the first merge rule the tokenizer learns is to group all `"u"` symbols followed by a `"g"` symbol together. Next, `"ug"` is added to the vocabulary. The set of words then becomes ``` ("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5) ``` BPE then identifies the next most common symbol pair. It's `"u"` followed by `"n"`, which occurs 16 times. `"u"`, `"n"` is merged to `"un"` and added to the vocabulary. The next most frequent symbol pair is `"h"` followed by `"ug"`, occurring 15 times. Again the pair is merged and `"hug"` can be added to the vocabulary. At this stage, the vocabulary is `["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]` and our set of unique words is represented as ``` ("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5) ``` Assuming, that the Byte-Pair Encoding training would stop at this point, the learned merge rules would then be applied to new words (as long as those new words do not include symbols that were not in the base vocabulary). For instance, the word `"bug"` would be tokenized to `["b", "ug"]` but `"mug"` would be tokenized as `["<unk>", "ug"]` since the symbol `"m"` is not in the base vocabulary. In general, single letters such as `"m"` are not replaced by the `"<unk>"` symbol because the training data usually includes at least one occurrence of each letter, but it is likely to happen for very special characters like emojis. As mentioned earlier, the vocabulary size, *i.e.* the base vocabulary size + the number of merges, is a hyperparameter to choose. For instance [GPT](model_doc/gpt) has a vocabulary size of 40,478 since they have 478 base characters and chose to stop training after 40,000 merges. #### Byte-level BPE A base vocabulary that includes all possible base characters can be quite large if *e.g.* all unicode characters are considered as base characters. To have a better base vocabulary, [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) uses bytes as the base vocabulary, which is a clever trick to force the base vocabulary to be of size 256 while ensuring that every base character is included in the vocabulary. With some additional rules to deal with punctuation, the GPT2's tokenizer can tokenize every text without the need for the <unk> symbol. [GPT-2](model_doc/gpt) has a vocabulary size of 50,257, which corresponds to the 256 bytes base tokens, a special end-of-text token and the symbols learned with 50,000 merges. <a id='wordpiece'></a> ### WordPiece WordPiece is the subword tokenization algorithm used for [BERT](model_doc/bert), [DistilBERT](model_doc/distilbert), and [Electra](model_doc/electra). The algorithm was outlined in [Japanese and Korean Voice Search (Schuster et al., 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) and is very similar to BPE. WordPiece first initializes the vocabulary to include every character present in the training data and progressively learns a given number of merge rules. In contrast to BPE, WordPiece does not choose the most frequent symbol pair, but the one that maximizes the likelihood of the training data once added to the vocabulary. So what does this mean exactly? Referring to the previous example, maximizing the likelihood of the training data is equivalent to finding the symbol pair, whose probability divided by the probabilities of its first symbol followed by its second symbol is the greatest among all symbol pairs. *E.g.* `"u"`, followed by `"g"` would have only been merged if the probability of `"ug"` divided by `"u"`, `"g"` would have been greater than for any other symbol pair. Intuitively, WordPiece is slightly different to BPE in that it evaluates what it _loses_ by merging two symbols to ensure it's _worth it_. <a id='unigram'></a> ### Unigram Unigram is a subword tokenization algorithm introduced in [Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)](https://arxiv.org/pdf/1804.10959.pdf). In contrast to BPE or WordPiece, Unigram initializes its base vocabulary to a large number of symbols and progressively trims down each symbol to obtain a smaller vocabulary. The base vocabulary could for instance correspond to all pre-tokenized words and the most common substrings. Unigram is not used directly for any of the models in the transformers, but it's used in conjunction with [SentencePiece](#sentencepiece). At each training step, the Unigram algorithm defines a loss (often defined as the log-likelihood) over the training data given the current vocabulary and a unigram language model. Then, for each symbol in the vocabulary, the algorithm computes how much the overall loss would increase if the symbol was to be removed from the vocabulary. Unigram then removes p (with p usually being 10% or 20%) percent of the symbols whose loss increase is the lowest, *i.e.* those symbols that least affect the overall loss over the training data. This process is repeated until the vocabulary has reached the desired size. The Unigram algorithm always keeps the base characters so that any word can be tokenized. Because Unigram is not based on merge rules (in contrast to BPE and WordPiece), the algorithm has several ways of tokenizing new text after training. As an example, if a trained Unigram tokenizer exhibits the vocabulary: ``` ["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"], ``` `"hugs"` could be tokenized both as `["hug", "s"]`, `["h", "ug", "s"]` or `["h", "u", "g", "s"]`. So which one to choose? Unigram saves the probability of each token in the training corpus on top of saving the vocabulary so that the probability of each possible tokenization can be computed after training. The algorithm simply picks the most likely tokenization in practice, but also offers the possibility to sample a possible tokenization according to their probabilities. Those probabilities are defined by the loss the tokenizer is trained on. Assuming that the training data consists of the words \\(x_{1}, \dots, x_{N}\\) and that the set of all possible tokenizations for a word \\(x_{i}\\) is defined as \\(S(x_{i})\\), then the overall loss is defined as $$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$ <a id='sentencepiece'></a> ### SentencePiece All tokenization algorithms described so far have the same problem: It is assumed that the input text uses spaces to separate words. However, not all languages use spaces to separate words. One possible solution is to use language specific pre-tokenizers, *e.g.* [XLM](model_doc/xlm) uses a specific Chinese, Japanese, and Thai pre-tokenizer). To solve this problem more generally, [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing (Kudo et al., 2018)](https://arxiv.org/pdf/1808.06226.pdf) treats the input as a raw input stream, thus including the space in the set of characters to use. It then uses the BPE or unigram algorithm to construct the appropriate vocabulary. The [`XLNetTokenizer`] uses SentencePiece for example, which is also why in the example earlier the `"▁"` character was included in the vocabulary. Decoding with SentencePiece is very easy since all tokens can just be concatenated and `"▁"` is replaced by a space. All transformers models in the library that use SentencePiece use it in combination with unigram. Examples of models using SentencePiece are [ALBERT](model_doc/albert), [XLNet](model_doc/xlnet), [Marian](model_doc/marian), and [T5](model_doc/t5).
transformers/docs/source/en/tokenizer_summary.md/0
{ "file_path": "transformers/docs/source/en/tokenizer_summary.md", "repo_id": "transformers", "token_count": 4933 }
252
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Compartir modelos personalizados La biblioteca 🤗 Transformers está diseñada para ser fácilmente ampliable. Cada modelo está completamente codificado sin abstracción en una subcarpeta determinada del repositorio, por lo que puedes copiar fácilmente un archivo del modelo y ajustarlo según tus necesidades. Si estás escribiendo un modelo completamente nuevo, podría ser más fácil comenzar desde cero. En este tutorial, te mostraremos cómo escribir un modelo personalizado y su configuración para que pueda usarse dentro de Transformers, y cómo puedes compartirlo con la comunidad (con el código en el que se basa) para que cualquiera pueda usarlo, incluso si no está presente en la biblioteca 🤗 Transformers. Ilustraremos todo esto con un modelo ResNet, envolviendo la clase ResNet de la [biblioteca timm](https://github.com/rwightman/pytorch-image-models) en un [`PreTrainedModel`]. ## Escribir una configuración personalizada Antes de adentrarnos en el modelo, primero escribamos su configuración. La configuración de un modelo es un objeto que contendrá toda la información necesaria para construir el modelo. Como veremos en la siguiente sección, el modelo solo puede tomar un `config` para ser inicializado, por lo que realmente necesitamos que ese objeto esté lo más completo posible. En nuestro ejemplo, tomaremos un par de argumentos de la clase ResNet que tal vez queramos modificar. Las diferentes configuraciones nos darán los diferentes tipos de ResNet que son posibles. Luego simplemente almacenamos esos argumentos después de verificar la validez de algunos de ellos. ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` Las tres cosas importantes que debes recordar al escribir tu propia configuración son las siguientes: - tienes que heredar de `PretrainedConfig`, - el `__init__` de tu `PretrainedConfig` debe aceptar cualquier `kwargs`, - esos `kwargs` deben pasarse a la superclase `__init__`. La herencia es para asegurarte de obtener toda la funcionalidad de la biblioteca 🤗 Transformers, mientras que las otras dos restricciones provienen del hecho de que una `PretrainedConfig` tiene más campos que los que estás configurando. Al recargar una `config` con el método `from_pretrained`, esos campos deben ser aceptados por tu `config` y luego enviados a la superclase. Definir un `model_type` para tu configuración (en este caso `model_type="resnet"`) no es obligatorio, a menos que quieras registrar tu modelo con las clases automáticas (ver la última sección). Una vez hecho esto, puedes crear y guardar fácilmente tu configuración como lo harías con cualquier otra configuración de un modelo de la biblioteca. Así es como podemos crear una configuración resnet50d y guardarla: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` Esto guardará un archivo llamado `config.json` dentro de la carpeta `custom-resnet`. Luego puedes volver a cargar tu configuración con el método `from_pretrained`: ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` También puedes usar cualquier otro método de la clase [`PretrainedConfig`], como [`~PretrainedConfig.push_to_hub`], para cargar directamente tu configuración en el Hub. ## Escribir un modelo personalizado Ahora que tenemos nuestra configuración de ResNet, podemos seguir escribiendo el modelo. En realidad escribiremos dos: una que extrae las características ocultas de un grupo de imágenes (como [`BertModel`]) y una que es adecuada para clasificación de imagenes (como [`BertForSequenceClassification`]). Como mencionamos antes, solo escribiremos un envoltura (_wrapper_) libre del modelo para simplificar este ejemplo. Lo único que debemos hacer antes de escribir esta clase es un mapeo entre los tipos de bloques y las clases de bloques reales. Luego se define el modelo desde la configuración pasando todo a la clase `ResNet`: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` Para el modelo que clasificará las imágenes, solo cambiamos el método de avance (es decir, el método `forward`): ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` En ambos casos, observa cómo heredamos de `PreTrainedModel` y llamamos a la inicialización de la superclase con `config` (un poco como cuando escribes `torch.nn.Module`). La línea que establece `config_class` no es obligatoria, a menos que quieras registrar tu modelo con las clases automáticas (consulta la última sección). <Tip> Si tu modelo es muy similar a un modelo dentro de la biblioteca, puedes reutilizar la misma configuración de ese modelo. </Tip> Puedes hacer que tu modelo devuelva lo que quieras, pero devolver un diccionario como lo hicimos para `ResnetModelForImageClassification`, con el `loss` incluido cuando se pasan las etiquetas, hará que tu modelo se pueda usar directamente dentro de la clase [`Trainer`]. Usar otro formato de salida está bien, siempre y cuando estés planeando usar tu propio bucle de entrenamiento u otra biblioteca para el entrenamiento. Ahora que tenemos nuestra clase, vamos a crear un modelo: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` Nuevamente, puedes usar cualquiera de los métodos de [`PreTrainedModel`], como [`~PreTrainedModel.save_pretrained`] o [`~PreTrainedModel.push_to_hub`]. Usaremos el segundo en la siguiente sección y veremos cómo pasar los pesos del modelo con el código de nuestro modelo. Pero primero, carguemos algunos pesos previamente entrenados dentro de nuestro modelo. En tu caso de uso, probablemente estarás entrenando tu modelo personalizado con tus propios datos. Para ir rápido en este tutorial, usaremos la versión preentrenada de resnet50d. Dado que nuestro modelo es solo un envoltorio alrededor del resnet50d original, será fácil transferir esos pesos: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Ahora veamos cómo asegurarnos de que cuando hacemos [`~PreTrainedModel.save_pretrained`] o [`~PreTrainedModel.push_to_hub`], se guarda el código del modelo. ## Enviar el código al _Hub_ <Tip warning={true}> Esta _API_ es experimental y puede tener algunos cambios leves en las próximas versiones. </Tip> Primero, asegúrate de que tu modelo esté completamente definido en un archivo `.py`. Puedes basarte en importaciones relativas a otros archivos, siempre que todos los archivos estén en el mismo directorio (aún no admitimos submódulos para esta característica). Para nuestro ejemplo, definiremos un archivo `modeling_resnet.py` y un archivo `configuration_resnet.py` en una carpeta del directorio de trabajo actual llamado `resnet_model`. El archivo de configuración contiene el código de `ResnetConfig` y el archivo del modelo contiene el código de `ResnetModel` y `ResnetModelForImageClassification`. ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` El `__init__.py` puede estar vacío, solo está ahí para que Python detecte que `resnet_model` se puede usar como un módulo. <Tip warning={true}> Si copias archivos del modelo desde la biblioteca, deberás reemplazar todas las importaciones relativas en la parte superior del archivo para importarlos desde el paquete `transformers`. </Tip> Ten en cuenta que puedes reutilizar (o subclasificar) una configuración o modelo existente. Para compartir tu modelo con la comunidad, sigue estos pasos: primero importa el modelo y la configuración de ResNet desde los archivos recién creados: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` Luego, debes decirle a la biblioteca que deseas copiar el código de esos objetos cuando usas el método `save_pretrained` y registrarlos correctamente con una determinada clase automática (especialmente para modelos), simplemente ejecuta: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` Ten en cuenta que no es necesario especificar una clase automática para la configuración (solo hay una clase automática para ellos, [`AutoConfig`]), pero es diferente para los modelos. Tu modelo personalizado podría ser adecuado para muchas tareas diferentes, por lo que debes especificar cuál de las clases automáticas es la correcta para tu modelo. A continuación, vamos a crear la configuración y los modelos como lo hicimos antes: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Ahora, para enviar el modelo al Hub, asegúrate de haber iniciado sesión. Ejecuta en tu terminal: ```bash huggingface-cli login ``` o desde un _notebook_: ```py from huggingface_hub import notebook_login notebook_login() ``` Luego puedes ingresar a tu propio espacio (o una organización de la que seas miembro) de esta manera: ```py resnet50d.push_to_hub("custom-resnet50d") ``` Además de los pesos del modelo y la configuración en formato json, esto también copió los archivos `.py` del modelo y la configuración en la carpeta `custom-resnet50d` y subió el resultado al Hub. Puedes verificar el resultado en este [repositorio de modelos](https://huggingface.co/sgugger/custom-resnet50d). Consulta el tutorial sobre cómo [compartir modelos](model_sharing) para obtener más información sobre el método para subir modelos al Hub. ## Usar un modelo con código personalizado Puedes usar cualquier configuración, modelo o _tokenizador_ con archivos de código personalizado en tu repositorio con las clases automáticas y el método `from_pretrained`. Todos los archivos y códigos cargados en el Hub se analizan en busca de malware (consulta la documentación de [seguridad del Hub](https://huggingface.co/docs/hub/security#malware-scanning) para obtener más información), pero aún debes revisar el código del modelo y el autor para evitar la ejecución de código malicioso en tu computadora. Configura `trust_remote_code=True` para usar un modelo con código personalizado: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` También se recomienda encarecidamente pasar un _hash_ de confirmación como una "revisión" para asegurarte de que el autor de los modelos no actualizó el código con algunas líneas nuevas maliciosas (a menos que confíes plenamente en los autores de los modelos). ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` Ten en cuenta que al navegar por el historial de confirmaciones del repositorio del modelo en Hub, hay un botón para copiar fácilmente el hash de confirmación de cualquier _commit_. ## Registrar un model con código personalizado a las clases automáticas Si estás escribiendo una biblioteca que amplía 🤗 Transformers, es posible que quieras ampliar las clases automáticas para incluir tu propio modelo. Esto es diferente de enviar el código al Hub en el sentido de que los usuarios necesitarán importar tu biblioteca para obtener los modelos personalizados (al contrario de descargar automáticamente el código del modelo desde Hub). Siempre que tu configuración tenga un atributo `model_type` que sea diferente de los tipos de modelos existentes, y que tus clases modelo tengan los atributos `config_class` correctos, puedes agregarlos a las clases automáticas de la siguiente manera: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` Ten en cuenta que el primer argumento utilizado al registrar tu configuración personalizada en [`AutoConfig`] debe coincidir con el `model_type` de tu configuración personalizada, y el primer argumento utilizado al registrar tus modelos personalizados en cualquier clase del modelo automático debe coincidir con el `config_class ` de esos modelos.
transformers/docs/source/es/custom_models.md/0
{ "file_path": "transformers/docs/source/es/custom_models.md", "repo_id": "transformers", "token_count": 5983 }
253
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Entrenamiento con scripts Junto con los [notebooks](./noteboks/README) de 🤗 Transformers, también hay scripts con ejemplos que muestran cómo entrenar un modelo para una tarea en [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), o [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). También encontrarás scripts que hemos usado en nuestros [proyectos de investigación](https://github.com/huggingface/transformers/tree/main/examples/research_projects) y [ejemplos pasados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que en su mayoría son aportados por la comunidad. Estos scripts no se mantienen activamente y requieren una versión específica de 🤗 Transformers que probablemente sea incompatible con la última versión de la biblioteca. No se espera que los scripts de ejemplo funcionen de inmediato en todos los problemas, y es posible que debas adaptar el script al problema que estás tratando de resolver. Para ayudarte con esto, la mayoría de los scripts exponen completamente cómo se preprocesan los datos, lo que te permite editarlos según sea necesario para tu caso de uso. Para cualquier característica que te gustaría implementar en un script de ejemplo, por favor discútelo en el [foro](https://discuss.huggingface.co/) o con un [issue](https://github.com/huggingface/transformers/issues) antes de enviar un Pull Request. Si bien agradecemos las correcciones de errores, es poco probable que fusionemos un Pull Request que agregue más funcionalidad a costa de la legibilidad. Esta guía te mostrará cómo ejecutar un ejemplo de un script de entrenamiento para resumir texto en [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) y [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). Se espera que todos los ejemplos funcionen con ambos frameworks a menos que se especifique lo contrario. ## Configuración Para ejecutar con éxito la última versión de los scripts de ejemplo debes **instalar 🤗 Transformers desde su fuente** en un nuevo entorno virtual: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` Para versiones anteriores de los scripts de ejemplo, haz clic en alguno de los siguientes links: <details> <summary>Ejemplos de versiones anteriores de 🤗 Transformers</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> Luego cambia tu clon actual de 🤗 Transformers a una versión específica, por ejemplo v3.5.1: ```bash git checkout tags/v3.5.1 ``` Una vez que hayas configurado la versión correcta de la biblioteca, ve a la carpeta de ejemplo de tu elección e instala los requisitos específicos del ejemplo: ```bash pip install -r requirements.txt ``` ## Ejecutar un script <frameworkcontent> <pt> El script de ejemplo descarga y preprocesa un conjunto de datos de la biblioteca 🤗 [Datasets](https://huggingface.co/docs/datasets/). Luego, el script ajusta un conjunto de datos con [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) en una arquitectura que soporta la tarea de resumen. El siguiente ejemplo muestra cómo ajustar un [T5-small](https://huggingface.co/google-t5/t5-small) en el conjunto de datos [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). El modelo T5 requiere un argumento adicional `source_prefix` debido a cómo fue entrenado. Este aviso le permite a T5 saber que se trata de una tarea de resumir. ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> El script de ejemplo descarga y preprocesa un conjunto de datos de la biblioteca 🤗 [Datasets](https://huggingface.co/docs/datasets/). Luego, el script ajusta un conjunto de datos utilizando Keras en una arquitectura que soporta la tarea de resumir. El siguiente ejemplo muestra cómo ajustar un [T5-small](https://huggingface.co/google-t5/t5-small) en el conjunto de datos [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). El modelo T5 requiere un argumento adicional `source_prefix` debido a cómo fue entrenado. Este aviso le permite a T5 saber que se trata de una tarea de resumir. ```bash python examples/tensorflow/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Entrenamiento distribuido y de precisión mixta [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) admite un entrenamiento distribuido y de precisión mixta, lo que significa que también puedes usarlo en un script. Para habilitar ambas características: - Agrega el argumento `fp16` para habilitar la precisión mixta. - Establece la cantidad de GPU que se usará con el argumento `nproc_per_node`. ```bash torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` Los scripts de TensorFlow utilizan [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) para el entrenamiento distribuido, y no es necesario agregar argumentos adicionales al script de entrenamiento. El script de TensorFlow utilizará múltiples GPUs de forma predeterminada si están disponibles. ## Ejecutar un script en una TPU <frameworkcontent> <pt> Las Unidades de Procesamiento de Tensor (TPUs) están diseñadas específicamente para acelerar el rendimiento. PyTorch admite TPU con el compilador de aprendizaje profundo [XLA](https://www.tensorflow.org/xla) (consulta [aquí](https://github.com/pytorch/xla/blob/master/README.md) para obtener más detalles). Para usar una TPU, inicia el script `xla_spawn.py` y usa el argumento `num_cores` para establecer la cantidad de núcleos de TPU que deseas usar. ```bash python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> Las Unidades de Procesamiento de Tensor (TPUs) están diseñadas específicamente para acelerar el rendimiento. TensorFlow utiliza [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) para entrenar en TPUs. Para usar una TPU, pasa el nombre del recurso de la TPU al argumento `tpu` ```bash python run_summarization.py \ --tpu name_of_tpu_resource \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Ejecutar un script con 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate) es una biblioteca exclusiva de PyTorch que ofrece un método unificado para entrenar un modelo en varios tipos de configuraciones (solo CPU, GPU múltiples, TPU) mientras mantiene una visibilidad completa en el ciclo de entrenamiento de PyTorch. Asegúrate de tener 🤗 Accelerate instalado si aún no lo tienes: > Nota: Como Accelerate se está desarrollando rápidamente, debes instalar la versión git de Accelerate para ejecutar los scripts ```bash pip install git+https://github.com/huggingface/accelerate ``` En lugar del script `run_summarization.py`, debes usar el script `run_summarization_no_trainer.py`. Los scripts compatibles con 🤗 Accelerate tendrán un archivo `task_no_trainer.py` en la carpeta. Comienza ejecutando el siguiente comando para crear y guardar un archivo de configuración: ```bash accelerate config ``` Prueba tu configuración para asegurarte que está configurada correctamente: ```bash accelerate test ``` Todo listo para iniciar el entrenamiento: ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` ## Usar un conjunto de datos personalizado El script de la tarea resumir admite conjuntos de datos personalizados siempre que sean un archivo CSV o JSON Line. Cuando uses tu propio conjunto de datos, necesitas especificar varios argumentos adicionales: - `train_file` y `validation_file` especifican la ruta a tus archivos de entrenamiento y validación. - `text_column` es el texto de entrada para resumir. - `summary_column` es el texto de destino para la salida. Un script para resumir que utiliza un conjunto de datos personalizado se vera así: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` ## Prueba un script A veces, es una buena idea ejecutar tu secuencia de comandos en una cantidad menor de ejemplos para asegurarte de que todo funciona como se espera antes de comprometerte con un conjunto de datos completo, lo que puede demorar horas en completarse. Utiliza los siguientes argumentos para truncar el conjunto de datos a un número máximo de muestras: - `max_train_samples` - `max_eval_samples` - `max_predict_samples` ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` No todos los scripts de ejemplo admiten el argumento `max_predict_samples`. Puede que desconozcas si la secuencia de comandos admite este argumento, agrega `-h` para verificar: ```bash examples/pytorch/summarization/run_summarization.py -h ``` ## Reanudar el entrenamiento desde el punto de control Otra opción útil para habilitar es reanudar el entrenamiento desde un punto de control anterior. Esto asegurará que puedas continuar donde lo dejaste sin comenzar de nuevo si tu entrenamiento se interrumpe. Hay dos métodos para reanudar el entrenamiento desde un punto de control. El primer método utiliza el argumento `output_dir previous_output_dir` para reanudar el entrenamiento desde el último punto de control almacenado en `output_dir`. En este caso, debes eliminar `overwrite_output_dir`: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate ``` El segundo método utiliza el argumento `resume_from_checkpoint path_to_specific_checkpoint` para reanudar el entrenamiento desde una carpeta de punto de control específica. ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` ## Comparte tu modelo Todos los scripts pueden cargar tu modelo final en el [Model Hub](https://huggingface.co/models). Asegúrate de haber iniciado sesión en Hugging Face antes de comenzar: ```bash huggingface-cli login ``` Luego agrega el argumento `push_to_hub` al script. Este argumento creará un repositorio con tu nombre de usuario Hugging Face y el nombre de la carpeta especificado en `output_dir`. Para darle a tu repositorio un nombre específico, usa el argumento `push_to_hub_model_id` para añadirlo. El repositorio se incluirá automáticamente en tu namespace. El siguiente ejemplo muestra cómo cargar un modelo con un nombre de repositorio específico: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ```
transformers/docs/source/es/run_scripts.md/0
{ "file_path": "transformers/docs/source/es/run_scripts.md", "repo_id": "transformers", "token_count": 7017 }
254
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Visite rapide - local: installation title: Installation title: Démarrer - sections: - local: in_translation title: Pipelines pour l'inférence - local: autoclass_tutorial title: Chargement d'instances pré-entraînées avec une AutoClass - local: in_translation title: Préparation des données - local: in_translation title: Fine-tune un modèle pré-entraîné - local: in_translation title: Entraînement avec un script - local: in_translation title: Entraînement distribué avec 🤗 Accelerate - local: in_translation title: Chargement et entraînement des adaptateurs avec 🤗 PEFT - local: in_translation title: Partager un modèle - local: in_translation title: Agents - local: in_translation title: Génération avec LLMs title: Tutoriels
transformers/docs/source/fr/_toctree.yml/0
{ "file_path": "transformers/docs/source/fr/_toctree.yml", "repo_id": "transformers", "token_count": 376 }
255
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convertire checkpoint di Tensorflow È disponibile un'interfaccia a linea di comando per convertire gli originali checkpoint di Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM in modelli che possono essere caricati utilizzando i metodi `from_pretrained` della libreria. <Tip> A partire dalla versione 2.3.0 lo script di conversione è parte di transformers CLI (**transformers-cli**), disponibile in ogni installazione di transformers >=2.3.0. La seguente documentazione riflette il formato dei comandi di **transformers-cli convert**. </Tip> ## BERT Puoi convertire qualunque checkpoint Tensorflow di BERT (in particolare [i modeli pre-allenati rilasciati da Google](https://github.com/google-research/bert#pre-trained-models)) in un file di salvataggio Pytorch utilizzando lo script [convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py). Questo CLI prende come input un checkpoint di Tensorflow (tre files che iniziano con `bert_model.ckpt`) ed il relativo file di configurazione (`bert_config.json`), crea un modello Pytorch per questa configurazione, carica i pesi dal checkpoint di Tensorflow nel modello di Pytorch e salva il modello che ne risulta in un file di salvataggio standard di Pytorch che può essere importato utilizzando `from_pretrained()` (vedi l'esempio nel [quicktour](quicktour) , [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py) ). Devi soltanto lanciare questo script di conversione **una volta** per ottenere un modello Pytorch. Dopodichè, potrai tralasciare il checkpoint di Tensorflow (i tre files che iniziano con `bert_model.ckpt`), ma assicurati di tenere il file di configurazione (`bert_config.json`) ed il file di vocabolario (`vocab.txt`) in quanto queste componenti sono necessarie anche per il modello di Pytorch. Per lanciare questo specifico script di conversione avrai bisogno di un'installazione di Tensorflow e di Pytorch (`pip install tensorflow`). Il resto della repository richiede soltanto Pytorch. Questo è un esempio del processo di conversione per un modello `BERT-Base Uncased` pre-allenato: ```bash export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \ --tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \ --config $BERT_BASE_DIR/bert_config.json \ --pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin ``` Puoi scaricare i modelli pre-allenati di Google per la conversione [qua](https://github.com/google-research/bert#pre-trained-models). ## ALBERT Per il modello ALBERT, converti checkpoint di Tensoflow in Pytorch utilizzando lo script [convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py). Il CLI prende come input un checkpoint di Tensorflow (tre files che iniziano con `model.ckpt-best`) e i relativi file di configurazione (`albert_config.json`), dopodichè crea e salva un modello Pytorch. Per lanciare questa conversione avrai bisogno di un'installazione di Tensorflow e di Pytorch. Ecco un esempio del procedimento di conversione di un modello `ALBERT Base` pre-allenato: ```bash export ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \ --tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \ --config $ALBERT_BASE_DIR/albert_config.json \ --pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin ``` Puoi scaricare i modelli pre-allenati di Google per la conversione [qui](https://github.com/google-research/albert#pre-trained-models). ## OpenAI GPT Ecco un esempio del processo di conversione di un modello OpenAI GPT pre-allenato, assumendo che il tuo checkpoint di NumPy sia salvato nello stesso formato dei modelli pre-allenati OpenAI (vedi [qui](https://github.com/openai/finetune-transformer-lm)): ```bash export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \ --tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT_CONFIG] \ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \ ``` ## OpenAI GPT-2 Ecco un esempio del processo di conversione di un modello OpenAI GPT-2 pre-allenato (vedi [qui](https://github.com/openai/gpt-2)): ```bash export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/openai-community/gpt2/pretrained/weights transformers-cli convert --model_type openai-community/gpt2 \ --tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT2_CONFIG] \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` ## XLNet Ecco un esempio del processo di conversione di un modello XLNet pre-allenato: ```bash export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \ --tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \ --config $TRANSFO_XL_CONFIG_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--finetuning_task_name XLNET_FINETUNED_TASK] \ ``` ## XLM Ecco un esempio del processo di conversione di un modello XLM pre-allenato: ```bash export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \ --tf_checkpoint $XLM_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT [--config XML_CONFIG] \ [--finetuning_task_name XML_FINETUNED_TASK] ``` ## T5 Ecco un esempio del processo di conversione di un modello T5 pre-allenato: ```bash export T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \ --tf_checkpoint $T5/t5_model.ckpt \ --config $T5/t5_config.json \ --pytorch_dump_output $T5/pytorch_model.bin ```
transformers/docs/source/it/converting_tensorflow_models.md/0
{ "file_path": "transformers/docs/source/it/converting_tensorflow_models.md", "repo_id": "transformers", "token_count": 2422 }
256
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Models ベースクラスである [`PreTrainedModel`]、[`TFPreTrainedModel`]、[`FlaxPreTrainedModel`] は、モデルの読み込みと保存に関する共通のメソッドを実装しており、これはローカルのファイルやディレクトリから、またはライブラリが提供する事前学習モデル構成(HuggingFaceのAWS S3リポジトリからダウンロード)からモデルを読み込むために使用できます。 [`PreTrainedModel`] と [`TFPreTrainedModel`] は、次の共通のメソッドも実装しています: - 語彙に新しいトークンが追加された場合に、入力トークン埋め込みのリサイズを行う - モデルのアテンションヘッドを刈り込む 各モデルに共通するその他のメソッドは、[`~modeling_utils.ModuleUtilsMixin`](PyTorchモデル用)および[`~modeling_tf_utils.TFModuleUtilsMixin`](TensorFlowモデル用)で定義されており、テキスト生成の場合、[`~generation.GenerationMixin`](PyTorchモデル用)、[`~generation.TFGenerationMixin`](TensorFlowモデル用)、および[`~generation.FlaxGenerationMixin`](Flax/JAXモデル用)もあります。 ## PreTrainedModel [[autodoc]] PreTrainedModel - push_to_hub - all <a id='from_pretrained-torch-dtype'></a> ### 大規模モデルの読み込み Transformers 4.20.0では、[`~PreTrainedModel.from_pretrained`] メソッドが再設計され、[Accelerate](https://huggingface.co/docs/accelerate/big_modeling) を使用して大規模モデルを扱うことが可能になりました。これには Accelerate >= 0.9.0 と PyTorch >= 1.9.0 が必要です。以前の方法でフルモデルを作成し、その後事前学習の重みを読み込む代わりに(これにはメモリ内のモデルサイズが2倍必要で、ランダムに初期化されたモデル用と重み用の2つが必要でした)、モデルを空の外殻として作成し、事前学習の重みが読み込まれるときにパラメーターを実体化するオプションが追加されました。 このオプションは `low_cpu_mem_usage=True` で有効にできます。モデルはまず空の重みを持つメタデバイス上に作成され、その後状態辞書が内部に読み込まれます(シャードされたチェックポイントの場合、シャードごとに読み込まれます)。この方法で使用される最大RAMは、モデルの完全なサイズだけです。 ```py from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", low_cpu_mem_usage=True) ``` さらに、モデルが完全にRAMに収まらない場合(現時点では推論のみ有効)、異なるデバイスにモデルを直接配置できます。`device_map="auto"` を使用すると、Accelerateは各レイヤーをどのデバイスに配置するかを決定し、最速のデバイス(GPU)を最大限に活用し、残りの部分をCPU、あるいはGPU RAMが不足している場合はハードドライブにオフロードします。モデルが複数のデバイスに分割されていても、通常どおり実行されます。 `device_map` を渡す際、`low_cpu_mem_usage` は自動的に `True` に設定されるため、それを指定する必要はありません。 ```py from transformers import AutoModelForSeq2SeqLM t0pp = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0pp", device_map="auto") ``` モデルがデバイス間でどのように分割されたかは、その `hf_device_map` 属性を見ることで確認できます: ```py t0pp.hf_device_map ``` ```python out {'shared': 0, 'decoder.embed_tokens': 0, 'encoder': 0, 'decoder.block.0': 0, 'decoder.block.1': 1, 'decoder.block.2': 1, 'decoder.block.3': 1, 'decoder.block.4': 1, 'decoder.block.5': 1, 'decoder.block.6': 1, 'decoder.block.7': 1, 'decoder.block.8': 1, 'decoder.block.9': 1, 'decoder.block.10': 1, 'decoder.block.11': 1, 'decoder.block.12': 1, 'decoder.block.13': 1, 'decoder.block.14': 1, 'decoder.block.15': 1, 'decoder.block.16': 1, 'decoder.block.17': 1, 'decoder.block.18': 1, 'decoder.block.19': 1, 'decoder.block.20': 1, 'decoder.block.21': 1, 'decoder.block.22': 'cpu', 'decoder.block.23': 'cpu', 'decoder.final_layer_norm': 'cpu', 'decoder.dropout': 'cpu', 'lm_head': 'cpu'} ``` 同じフォーマットに従って、独自のデバイスマップを作成することもできます(レイヤー名からデバイスへの辞書です)。モデルのすべてのパラメータを指定されたデバイスにマップする必要がありますが、1つのレイヤーが完全に同じデバイスにある場合、そのレイヤーのサブモジュールのすべてがどこに行くかの詳細を示す必要はありません。例えば、次のデバイスマップはT0ppに適しています(GPUメモリがある場合): ```python device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} ``` モデルのメモリへの影響を最小限に抑えるもう 1 つの方法は、低精度の dtype (`torch.float16` など) でモデルをインスタンス化するか、以下で説明する直接量子化手法を使用することです。 ### Model Instantiation dtype Pytorch では、モデルは通常 `torch.float32` 形式でインスタンス化されます。これは、しようとすると問題になる可能性があります 重みが fp16 にあるモデルをロードすると、2 倍のメモリが必要になるためです。この制限を克服するには、次のことができます。 `torch_dtype` 引数を使用して、目的の `dtype` を明示的に渡します。 ```python model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype=torch.float16) ``` または、モデルを常に最適なメモリ パターンでロードしたい場合は、特別な値 `"auto"` を使用できます。 そして、`dtype` はモデルの重みから自動的に導出されます。 ```python model = T5ForConditionalGeneration.from_pretrained("t5", torch_dtype="auto") ``` スクラッチからインスタンス化されたモデルには、どの `dtype` を使用するかを指示することもできます。 ```python config = T5Config.from_pretrained("t5") model = AutoModel.from_config(config) ``` Pytorch の設計により、この機能は浮動小数点 dtype でのみ使用できます。 ## ModuleUtilsMixin [[autodoc]] modeling_utils.ModuleUtilsMixin ## TFPreTrainedModel [[autodoc]] TFPreTrainedModel - push_to_hub - all ## TFModelUtilsMixin [[autodoc]] modeling_tf_utils.TFModelUtilsMixin ## FlaxPreTrainedModel [[autodoc]] FlaxPreTrainedModel - push_to_hub - all ## Pushing to the Hub [[autodoc]] utils.PushToHubMixin ## Sharded checkpoints [[autodoc]] modeling_utils.load_sharded_checkpoint
transformers/docs/source/ja/main_classes/model.md/0
{ "file_path": "transformers/docs/source/ja/main_classes/model.md", "repo_id": "transformers", "token_count": 3297 }
257
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Bark ## Overview Bark は、[suno-ai/bark](https://github.com/suno-ai/bark) で Suno AI によって提案されたトランスフォーマーベースのテキスト読み上げモデルです。 Bark は 4 つの主要なモデルで構成されています。 - [`BarkSemanticModel`] ('テキスト'モデルとも呼ばれる): トークン化されたテキストを入力として受け取り、テキストの意味を捉えるセマンティック テキスト トークンを予測する因果的自己回帰変換モデル。 - [`BarkCoarseModel`] ('粗い音響' モデルとも呼ばれる): [`BarkSemanticModel`] モデルの結果を入力として受け取る因果的自己回帰変換器。 EnCodec に必要な最初の 2 つのオーディオ コードブックを予測することを目的としています。 - [`BarkFineModel`] ('微細音響' モデル)、今回は非因果的オートエンコーダー トランスフォーマーで、以前のコードブック埋め込みの合計に基づいて最後のコードブックを繰り返し予測します。 - [`EncodecModel`] からすべてのコードブック チャネルを予測したので、Bark はそれを使用して出力オーディオ配列をデコードします。 最初の 3 つのモジュールはそれぞれ、特定の事前定義された音声に従って出力サウンドを調整するための条件付きスピーカー埋め込みをサポートできることに注意してください。 ### Optimizing Bark Bark は、コードを数行追加するだけで最適化でき、**メモリ フットプリントが大幅に削減**され、**推論が高速化**されます。 #### Using half-precision モデルを半精度でロードするだけで、推論を高速化し、メモリ使用量を 50% 削減できます。 ```python from transformers import BarkModel import torch device = "cuda" if torch.cuda.is_available() else "cpu" model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device) ``` #### Using 🤗 Better Transformer Better Transformer は、内部でカーネル融合を実行する 🤗 最適な機能です。パフォーマンスを低下させることなく、速度を 20% ~ 30% 向上させることができます。モデルを 🤗 Better Transformer にエクスポートするのに必要なコードは 1 行だけです。 ```python model = model.to_bettertransformer() ``` この機能を使用する前に 🤗 Optimum をインストールする必要があることに注意してください。 [インストール方法はこちら](https://huggingface.co/docs/optimum/installation) #### Using CPU offload 前述したように、Bark は 4 つのサブモデルで構成されており、オーディオ生成中に順番に呼び出されます。言い換えれば、1 つのサブモデルが使用されている間、他のサブモデルはアイドル状態になります。 CUDA デバイスを使用している場合、メモリ フットプリントの 80% 削減による恩恵を受ける簡単な解決策は、アイドル状態の GPU のサブモデルをオフロードすることです。この操作は CPU オフロードと呼ばれます。 1行のコードで使用できます。 ```python model.enable_cpu_offload() ``` この機能を使用する前に、🤗 Accelerate をインストールする必要があることに注意してください。 [インストール方法はこちら](https://huggingface.co/docs/accelerate/basic_tutorials/install) #### Combining optimization techniques 最適化手法を組み合わせて、CPU オフロード、半精度、🤗 Better Transformer をすべて一度に使用できます。 ```python from transformers import BarkModel import torch device = "cuda" if torch.cuda.is_available() else "cpu" # load in fp16 model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device) # convert to bettertransformer model = BetterTransformer.transform(model, keep_original_model=False) # enable CPU offload model.enable_cpu_offload() ``` 推論最適化手法の詳細については、[こちら](https://huggingface.co/docs/transformers/perf_infer_gpu_one) をご覧ください。 ### Tips Suno は、多くの言語で音声プリセットのライブラリを提供しています [こちら](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c)。 これらのプリセットは、ハブ [こちら](https://huggingface.co/suno/bark-small/tree/main/speaker_embeddings) または [こちら](https://huggingface.co/suno/bark/tree/main/speaker_embeddings)。 ```python >>> from transformers import AutoProcessor, BarkModel >>> processor = AutoProcessor.from_pretrained("suno/bark") >>> model = BarkModel.from_pretrained("suno/bark") >>> voice_preset = "v2/en_speaker_6" >>> inputs = processor("Hello, my dog is cute", voice_preset=voice_preset) >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` Bark は、非常にリアルな **多言語** 音声だけでなく、音楽、背景ノイズ、単純な効果音などの他の音声も生成できます。 ```python >>> # Multilingual speech - simplified Chinese >>> inputs = processor("惊人的!我会说中文") >>> # Multilingual speech - French - let's use a voice_preset as well >>> inputs = processor("Incroyable! Je peux générer du son.", voice_preset="fr_speaker_5") >>> # Bark can also generate music. You can help it out by adding music notes around your lyrics. >>> inputs = processor("♪ Hello, my dog is cute ♪") >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` このモデルは、笑う、ため息、泣くなどの**非言語コミュニケーション**を生成することもできます。 ```python >>> # Adding non-speech cues to the input text >>> inputs = processor("Hello uh ... [clears throat], my dog is cute [laughter]") >>> audio_array = model.generate(**inputs) >>> audio_array = audio_array.cpu().numpy().squeeze() ``` オーディオを保存するには、モデル設定と scipy ユーティリティからサンプル レートを取得するだけです。 ```python >>> from scipy.io.wavfile import write as write_wav >>> # save audio to disk, but first take the sample rate from the model config >>> sample_rate = model.generation_config.sample_rate >>> write_wav("bark_generation.wav", sample_rate, audio_array) ``` このモデルは、[Yoach Lacombe (ylacombe)](https://huggingface.co/ylacombe) および [Sanchit Gandhi (sanchit-gandhi)](https://github.com/sanchit-gandhi) によって提供されました。 元のコードは [ここ](https://github.com/suno-ai/bark) にあります。 ## BarkConfig [[autodoc]] BarkConfig - all ## BarkProcessor [[autodoc]] BarkProcessor - all - __call__ ## BarkModel [[autodoc]] BarkModel - generate - enable_cpu_offload ## BarkSemanticModel [[autodoc]] BarkSemanticModel - forward ## BarkCoarseModel [[autodoc]] BarkCoarseModel - forward ## BarkFineModel [[autodoc]] BarkFineModel - forward ## BarkCausalModel [[autodoc]] BarkCausalModel - forward ## BarkCoarseConfig [[autodoc]] BarkCoarseConfig - all ## BarkFineConfig [[autodoc]] BarkFineConfig - all ## BarkSemanticConfig [[autodoc]] BarkSemanticConfig - all
transformers/docs/source/ja/model_doc/bark.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bark.md", "repo_id": "transformers", "token_count": 3181 }
258
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BLIP ## Overview BLIP モデルは、[BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) で Junnan Li、Dongxu Li、Caiming Xiong、Steven Hoi によって提案されました。 。 BLIP は、次のようなさまざまなマルチモーダル タスクを実行できるモデルです。 - 視覚的な質問応答 - 画像とテキストの検索(画像とテキストのマッチング) - 画像キャプション 論文の要約は次のとおりです。 *視覚言語事前トレーニング (VLP) により、多くの視覚言語タスクのパフォーマンスが向上しました。 ただし、既存の事前トレーニング済みモデルのほとんどは、理解ベースのタスクまたは世代ベースのタスクのいずれかでのみ優れています。さらに、最適ではない監視ソースである Web から収集されたノイズの多い画像とテキストのペアを使用してデータセットをスケールアップすることで、パフォーマンスの向上が大幅に達成されました。この論文では、視覚言語の理解と生成タスクの両方に柔軟に移行する新しい VLP フレームワークである BLIP を提案します。 BLIP は、キャプションをブートストラップすることでノイズの多い Web データを効果的に利用します。キャプショナーが合成キャプションを生成し、フィルターがノイズの多いキャプションを除去します。画像テキスト検索 (平均再現率 +2.7%@1)、画像キャプション作成 (CIDEr で +2.8%)、VQA ( VQA スコアは +1.6%)。 BLIP は、ゼロショット方式でビデオ言語タスクに直接転送した場合にも、強力な一般化能力を発揮します。コード、モデル、データセットがリリースされています。* ![BLIP.gif](https://cdn-uploads.huggingface.co/production/uploads/1670928184033-62441d1d9fdefb55a0b7d12c.gif) このモデルは [ybelkada](https://huggingface.co/ybelkada) によって提供されました。 元のコードは [ここ](https://github.com/salesforce/BLIP) にあります。 ## Resources - [Jupyter ノートブック](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb) カスタム データセットの画像キャプション用に BLIP を微調整する方法 ## BlipConfig [[autodoc]] BlipConfig - from_text_vision_configs ## BlipTextConfig [[autodoc]] BlipTextConfig ## BlipVisionConfig [[autodoc]] BlipVisionConfig ## BlipProcessor [[autodoc]] BlipProcessor ## BlipImageProcessor [[autodoc]] BlipImageProcessor - preprocess <frameworkcontent> <pt> ## BlipModel [[autodoc]] BlipModel - forward - get_text_features - get_image_features ## BlipTextModel [[autodoc]] BlipTextModel - forward ## BlipVisionModel [[autodoc]] BlipVisionModel - forward ## BlipForConditionalGeneration [[autodoc]] BlipForConditionalGeneration - forward ## BlipForImageTextRetrieval [[autodoc]] BlipForImageTextRetrieval - forward ## BlipForQuestionAnswering [[autodoc]] BlipForQuestionAnswering - forward </pt> <tf> ## TFBlipModel [[autodoc]] TFBlipModel - call - get_text_features - get_image_features ## TFBlipTextModel [[autodoc]] TFBlipTextModel - call ## TFBlipVisionModel [[autodoc]] TFBlipVisionModel - call ## TFBlipForConditionalGeneration [[autodoc]] TFBlipForConditionalGeneration - call ## TFBlipForImageTextRetrieval [[autodoc]] TFBlipForImageTextRetrieval - call ## TFBlipForQuestionAnswering [[autodoc]] TFBlipForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/blip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/blip.md", "repo_id": "transformers", "token_count": 1785 }
259
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=convbert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/conv-bert-base"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview ConvBERT モデルは、[ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) で Zihang Jiang、Weihao Yu、Daquan Zhou、Yunpeng Chen、Jiashi Feng、Shuicheng Yan によって提案されました。 やん。 論文の要約は次のとおりです。 *BERT やそのバリアントなどの事前トレーニング済み言語モデルは、最近、さまざまな環境で目覚ましいパフォーマンスを達成しています。 自然言語理解タスク。ただし、BERT はグローバルな自己注意ブロックに大きく依存しているため、問題が発生します。 メモリ使用量と計算コストが大きくなります。すべての注意が入力シーケンス全体に対してクエリを実行しますが、 グローバルな観点からアテンション マップを生成すると、一部のヘッドはローカルな依存関係のみを学習する必要があることがわかります。 これは、計算の冗長性が存在することを意味します。したがって、我々は、新しいスパンベースの動的畳み込みを提案します。 これらのセルフアテンション ヘッドを置き換えて、ローカルの依存関係を直接モデル化します。新しいコンボリューションヘッドと、 自己注意の頭を休め、グローバルとローカルの両方の状況でより効率的な新しい混合注意ブロックを形成します 学ぶ。この混合注意設計を BERT に装備し、ConvBERT モデルを構築します。実験でわかったことは、 ConvBERT は、トレーニング コストが低く、さまざまな下流タスクにおいて BERT およびその亜種よりも大幅に優れたパフォーマンスを発揮します。 モデルパラメータが少なくなります。注目すべきことに、ConvBERTbase モデルは 86.4 GLUE スコアを達成し、ELECTRAbase よりも 0.7 高いのに対し、 トレーニングコストは 1/4 未満です。コードと事前トレーニングされたモデルがリリースされます。* このモデルは、[abhishek](https://huggingface.co/abhishek) によって提供されました。オリジナルの実装が見つかります ここ: https://github.com/yitu-opensource/ConvBert ## Usage tips ConvBERT トレーニングのヒントは BERT のヒントと似ています。使用上のヒントについては、[BERT ドキュメント](bert) を参照してください。 ## Resources - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [マスクされた言語モデリング タスク ガイド](../tasks/masked_lang_modeling) - [多肢選択タスク ガイド](../tasks/multiple_choice) ## ConvBertConfig [[autodoc]] ConvBertConfig ## ConvBertTokenizer [[autodoc]] ConvBertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## ConvBertTokenizerFast [[autodoc]] ConvBertTokenizerFast <frameworkcontent> <pt> ## ConvBertModel [[autodoc]] ConvBertModel - forward ## ConvBertForMaskedLM [[autodoc]] ConvBertForMaskedLM - forward ## ConvBertForSequenceClassification [[autodoc]] ConvBertForSequenceClassification - forward ## ConvBertForMultipleChoice [[autodoc]] ConvBertForMultipleChoice - forward ## ConvBertForTokenClassification [[autodoc]] ConvBertForTokenClassification - forward ## ConvBertForQuestionAnswering [[autodoc]] ConvBertForQuestionAnswering - forward </pt> <tf> ## TFConvBertModel [[autodoc]] TFConvBertModel - call ## TFConvBertForMaskedLM [[autodoc]] TFConvBertForMaskedLM - call ## TFConvBertForSequenceClassification [[autodoc]] TFConvBertForSequenceClassification - call ## TFConvBertForMultipleChoice [[autodoc]] TFConvBertForMultipleChoice - call ## TFConvBertForTokenClassification [[autodoc]] TFConvBertForTokenClassification - call ## TFConvBertForQuestionAnswering [[autodoc]] TFConvBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/convbert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/convbert.md", "repo_id": "transformers", "token_count": 2155 }
260
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DialoGPT ## Overview DialoGPT は、[DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) で Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.これは、から抽出された 147M 万の会話のようなやりとりでトレーニングされた GPT2 モデルです。 レディット。 論文の要約は次のとおりです。 *私たちは、大規模で調整可能なニューラル会話応答生成モデル DialoGPT (対話生成事前トレーニング済み) を紹介します。 変成器)。 Reddit のコメント チェーンから抽出された 1 億 4,700 万件の会話のようなやり取りを対象にトレーニングされました。 2005 年から 2017 年にかけて、DialoGPT は人間に近いパフォーマンスを達成するために Hugging Face PyTorch トランスフォーマーを拡張しました。 シングルターンダイアログ設定における自動評価と人間による評価の両方。会話システムが DialoGPT を活用すると、強力なベースラインよりも関連性が高く、内容が充実し、コンテキストに一貫性のある応答が生成されます。 システム。神経反応の研究を促進するために、事前トレーニングされたモデルとトレーニング パイプラインが公開されています。 よりインテリジェントなオープンドメイン対話システムの生成と開発。* 元のコードは [ここ](https://github.com/microsoft/DialoGPT) にあります。 ## Usage tips - DialoGPT は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左よりも。 - DialoGPT は、会話データの因果言語モデリング (CLM) 目標に基づいてトレーニングされているため、強力です オープンドメイン対話システムにおける応答生成時。 - DialoGPT を使用すると、[DialoGPT's model card](https://huggingface.co/microsoft/DialoGPT-medium) に示されているように、ユーザーはわずか 10 行のコードでチャット ボットを作成できます。 トレーニング: DialoGPT をトレーニングまたは微調整するには、因果言語モデリング トレーニングを使用できます。公式論文を引用すると: *私たちは OpenAI GPT-2に従って、マルチターン対話セッションを長いテキストとしてモデル化し、生成タスクを言語としてフレーム化します モデリング。まず、ダイアログ セッション内のすべてのダイアログ ターンを長いテキスト x_1,..., x_N に連結します (N は * 詳細については、元の論文を参照してください。 <Tip> DialoGPT のアーキテクチャは GPT2 モデルに基づいています。API リファレンスと例については、[GPT2 のドキュメント ページ](openai-community/gpt2) を参照してください。 </Tip>
transformers/docs/source/ja/model_doc/dialogpt.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/dialogpt.md", "repo_id": "transformers", "token_count": 1576 }
261
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Training on Multiple GPUs 単一のGPUでのトレーニングが遅すぎる場合や、モデルの重みが単一のGPUのメモリに収まらない場合、複数のGPUを使用したセットアップが必要となります。単一のGPUから複数のGPUへの切り替えには、ワークロードを分散するためのある種の並列処理が必要です。データ、テンソル、またはパイプラインの並列処理など、さまざまな並列処理技術があります。ただし、すべてに適した一つの解決策は存在せず、最適な設定は使用するハードウェアに依存します。この記事は、おそらく他のフレームワークにも適用される主要な概念に焦点を当てつつ、PyTorchベースの実装に焦点を当てています。 <Tip> **注意**: [単一GPUセクション](perf_train_gpu_one) で紹介された多くの戦略(混合精度トレーニングや勾配蓄積など)は一般的であり、モデルのトレーニングに一般的に適用されます。したがって、マルチGPUやCPUトレーニングなどの次のセクションに入る前に、それを確認してください。 </Tip> まず、さまざまな1D並列処理技術とその利点および欠点について詳しく説明し、それらを2Dおよび3D並列処理に組み合わせてさらに高速なトレーニングを実現し、より大きなモデルをサポートする方法を検討します。さまざまな他の強力な代替手法も紹介されます。 ## Concepts 以下は、この文書で後で詳しく説明される主要な概念の簡単な説明です。 1. **DataParallel (DP)** - 同じセットアップが複数回複製され、各セットアップにデータのスライスが供給されます。処理は並行して行われ、各セットアップはトレーニングステップの最後に同期されます。 2. **TensorParallel (TP)** - 各テンソルは複数のチャンクに分割され、単一のGPUにテンソル全体が存在するのではなく、テンソルの各シャードが指定されたGPUに存在します。処理中に、各シャードは別々に並行して処理され、異なるGPUで同期され、ステップの最後に結果が同期されます。これは水平並列処理と呼ばれるもので、分割は水平レベルで行われます。 3. **PipelineParallel (PP)** - モデルは垂直(レイヤーレベル)に複数のGPUに分割され、モデルの単一または複数のレイヤーが単一のGPUに配置されます。各GPUはパイプラインの異なるステージを並行して処理し、バッチの小さなチャンクで作業します。 4. **Zero Redundancy Optimizer (ZeRO)** - TPといくらか似たようなテンソルのシャーディングを実行しますが、前向きまたは後向きの計算のためにテンソル全体が再構築されるため、モデルを変更する必要はありません。また、GPUメモリが制限されている場合に補償するためのさまざまなオフロード技術をサポートします。 5. **Sharded DDP** - Sharded DDPは、さまざまなZeRO実装で使用される基本的なZeROコンセプトの別名です。 各コンセプトの詳細に深入りする前に、大規模なインフラストラクチャで大規模なモデルをトレーニングする際の大まかな決定プロセスを見てみましょう。 ## Scalability Strategy **⇨ シングルノード / マルチGPU** * モデルが単一のGPUに収まる場合: 1. DDP - 分散データ並列 2. ZeRO - 状況と使用される構成に応じて速いかどうかが異なります * モデルが単一のGPUに収まらない場合: 1. PP 2. ZeRO 3. TP 非常に高速なノード内接続(NVLINKまたはNVSwitchなど)があれば、これらの3つはほぼ同じ速度になるはずで、これらがない場合、PPはTPまたはZeROよりも速くなります。TPの程度も差を生じるかもしれません。特定のセットアップでの勝者を見つけるために実験することが最善です。 TPはほとんどの場合、単一ノード内で使用されます。つまり、TPサイズ <= ノードごとのGPU数です。 * 最大のレイヤーが単一のGPUに収まらない場合: 1. ZeROを使用しない場合 - TPを使用する必要があります。PP単独では収まらないでしょう。 2. ZeROを使用する場合 - "シングルGPU"のエントリと同じものを参照してください **⇨ マルチノード / マルチGPU** * ノード間の高速接続がある場合: 1. ZeRO - モデルへのほとんどの変更が不要です 2. PP+TP+DP - 通信が少なく、モデルへの大規模な変更が必要です * ノード間の接続が遅く、GPUメモリがまだ不足している場合: 1. DP+PP+TP+ZeRO-1 ## Data Parallelism 2つのGPUを持つほとんどのユーザーは、`DataParallel`(DP)と`DistributedDataParallel`(DDP)によって提供されるトレーニング速度の向上をすでに享受しています。これらはほぼ自明に使用できるPyTorchの組み込み機能です。一般的に、すべてのモデルで動作するDDPを使用することをお勧めします。DPは一部のモデルで失敗する可能性があるためです。[PyTorchのドキュメンテーション](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html)自体もDDPの使用を推奨しています。 ### DP vs DDP `DistributedDataParallel`(DDP)は通常、`DataParallel`(DP)よりも高速ですが、常にそうとは限りません: * DPはPythonスレッドベースですが、DDPはマルチプロセスベースです。そのため、GIL(Global Interpreter Lock)などのPythonスレッドの制約がないためです。 * 一方、GPUカード間の遅い相互接続性は、DDPの場合に実際には遅い結果をもたらす可能性があります。 以下は、2つのモード間のGPU間通信の主な違いです: [DDP](https://pytorch.org/docs/master/notes/ddp.html): - 開始時、メインプロセスはモデルをGPU 0から他のGPUに複製します。 - それから各バッチごとに: 1. 各GPUは各自のミニバッチのデータを直接消費します。 2. `backward`中、ローカル勾配が準備できると、それらはすべてのプロセスで平均化されます。 [DP](https://pytorch.org/docs/master/generated/torch.nn.DataParallel.html): 各バッチごとに: 1. GPU 0はデータバッチを読み取り、それから各GPUにミニバッチを送信します。 2. GPU 0から各GPUに最新のモデルを複製します。 3. `forward`を実行し、各GPUからGPU 0に出力を送信し、損失を計算します。 4. GPU 0からすべてのGPUに損失を分散し、`backward`を実行します。 5. 各GPUからGPU 0に勾配を送信し、それらを平均化します。 DDPはバッチごとに行う通信は勾配の送信のみであり、一方、DPはバッチごとに5つの異なるデータ交換を行います。 DPはプロセス内でデータをPythonスレッドを介してコピーしますが、DDPは[torch.distributed](https://pytorch.org/docs/master/distributed.html)を介してデータをコピーします。 DPではGPU 0は他のGPUよりもはるかに多くの作業を行うため、GPUの未使用率が高くなります。 DDPは複数のマシン間で使用できますが、DPの場合はそうではありません。 DPとDDPの他にも違いがありますが、この議論には関係ありません。 これら2つのモードを深く理解したい場合、この[記事](https://www.telesens.co/2019/04/04/distributed-data-parallel-training-using-pytorch-on-aws/)を強くお勧めします。素晴らしいダイアグラムを含み、さまざまなハードウェアでの複数のベンチマークとプロファイラの出力を示し、知っておく必要があるすべての微妙なニュアンスを説明しています。 実際のベンチマークを見てみましょう: | Type | NVlink | Time | | :----- | ----- | ---: | | 2:DP | Y | 110s | | 2:DDP | Y | 101s | | 2:DDP | N | 131s | 解析: ここで、DPはNVlinkを使用したDDPに比べて約10%遅く、NVlinkを使用しないDDPに比べて約15%高速であることが示されています。 実際の違いは、各GPUが他のGPUと同期する必要があるデータの量に依存します。同期するデータが多いほど、遅いリンクが合計の実行時間を遅くする可能性が高くなります。 以下は完全なベンチマークコードと出力です: `NCCL_P2P_DISABLE=1`を使用して、対応するベンチマークでNVLink機能を無効にしました。 ```bash # DP rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ python examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 110.5948, 'train_samples_per_second': 1.808, 'epoch': 0.69} # DDP w/ NVlink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVlink rm -r /tmp/test-clm; NCCL_P2P_DISABLE=1 CUDA_VISIBLE_DEVICES=0,1 \ torchrun --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py \ --model_name_or_path openai-community/gpt2 --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` ハードウェア: 2x TITAN RTX、各24GB + 2つのNVLink(`nvidia-smi topo -m`で `NV2`) ソフトウェア: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0` ## ZeRO Data Parallelism ZeROパワードデータ並列処理(ZeRO-DP)は、次の[ブログ投稿](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)のダイアグラムで説明されています。 ![DeepSpeed-Image-1](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) これは理解が難しいかもしれませんが、実際にはこの概念は非常にシンプルです。これは通常の`DataParallel`(DP)ですが、完全なモデルパラメータ、勾配、およびオプティマイザの状態を複製する代わりに、各GPUはそれぞれのスライスのみを保存します。そして、実行時に、特定のレイヤーに必要な完全なレイヤーパラメータが必要な場合、すべてのGPUが同期して、お互いに不足している部分を提供します。それがすべてです。 3つのレイヤーからなる単純なモデルを考えてみましょう。各レイヤーには3つのパラメータがあります: ``` La | Lb | Lc ---|----|--- a0 | b0 | c0 a1 | b1 | c1 a2 | b2 | c2 ``` レイヤーLaには、重みa0、a1、およびa2があります。 3つのGPUがある場合、Sharded DDP(= Zero-DP)はモデルを3つのGPUに次のように分割します: ``` GPU0: La | Lb | Lc ---|----|--- a0 | b0 | c0 GPU1: La | Lb | Lc ---|----|--- a1 | b1 | c1 GPU2: La | Lb | Lc ---|----|--- a2 | b2 | c2 ``` これは、典型的なディープニューラルネットワーク(DNN)のダイアグラムを想像すると、テンソル並列処理と同様の水平スライスであるようなものです。垂直スライスは、異なるGPUに完全な層グループを配置する方法です。しかし、これは単なる出発点に過ぎません。 これから、各GPUは通常のデータ並列処理(DP)と同様に、通常のミニバッチを受け取ります: ``` x0 => GPU0 x1 => GPU1 x2 => GPU2 ``` 最初に、入力データはレイヤーLaに適用されます。 GPU0に焦点を当てましょう:x0は、その前向きパスを実行するためにa0、a1、a2のパラメータが必要ですが、GPU0にはa0しかありません。GPU1からa1を、GPU2からa2を受け取り、モデルの各部分をまとめます。 同様に、GPU1はミニバッチx1を受け取り、a1しか持っていませんが、a0とa2のパラメータが必要です。これらはGPU0とGPU2から取得します。 GPU2もx2を受け取ります。a0とa1はGPU0とGPU1から受け取り、a2とともに完全なテンソルを再構築します。 3つのGPUは完全なテンソルを再構築し、前向き計算が行われます。 計算が完了すると、不要になったデータは削除されます。計算中だけ使用され、再構築は事前にフェッチを使用して効率的に行われます。 そして、このプロセス全体がレイヤーLb、次に前向きでLc、そして逆方向でLc -> Lb -> Laに対して繰り返されます。 私にとって、これは効率的なグループでの重みの分散戦略のように聞こえます: 1. 人Aはテントを持っています。 2. 人Bはストーブを持っています。 3. 人Cは斧を持っています。 今、彼らは毎晩持っているものを共有し、他の人から持っていないものをもらい、朝には割り当てられたタイプのギアを詰めて旅を続けます。これがSharded DDP / Zero DPです。 この戦略を、各人が独自のテント、ストーブ、斧を持って運ばなければならないシンプルな戦略と比較してみてください。これがPyTorchのDataParallel(DPおよびDDP)です。 このトピックの文献を読む際に、以下の類義語に出会うかもしれません:Sharded、Partitioned。 ZeROがモデルの重みを分割する方法に注意を払うと、これはテンソルパラレリズムと非常に似ているように見えます。これは後で議論される垂直モデルパラレリズムとは異なり、各レイヤーの重みをパーティション/シャーディングします。 Implementations: - [DeepSpeed](https://www.deepspeed.ai/tutorials/zero/) ZeRO-DP stages 1+2+3 - [`transformers` integration](main_classes/trainer#trainer-integrations) ## Naive Model Parallelism (Vertical) and Pipeline Parallelism ナイーブモデルパラレリズム(MP)は、モデルの層を複数のGPUに分散させる方法です。このメカニズムは比較的単純で、希望する層を`.to()`メソッドを使用して特定のデバイスに切り替えるだけです。これにより、データがこれらの層を通過するたびに、データも層と同じデバイスに切り替えられ、残りの部分は変更されません。 私たちはこれを「垂直MP」と呼びます。なぜなら、ほとんどのモデルがどのように描かれるかを思い出すと、層を垂直にスライスするからです。たとえば、以下の図は8層のモデルを示しています: ``` =================== =================== | 0 | 1 | 2 | 3 | | 4 | 5 | 6 | 7 | =================== =================== gpu0 gpu1 ``` 我々は、モデルを垂直に2つに分割し、レイヤー0から3をGPU0に配置し、レイヤー4から7をGPU1に配置しました。 データがレイヤー0から1、1から2、2から3に移動する間は通常のモデルと同じです。しかし、データがレイヤー3からレイヤー4に移動する必要がある場合、GPU0からGPU1への移動が発生し、通信のオーバーヘッドが発生します。参加しているGPUが同じコンピュートノード(例:同じ物理マシン)にある場合、このコピーは非常に高速ですが、異なるコンピュートノード(例:複数のマシン)にある場合、通信のオーバーヘッドは大幅に増加する可能性があります。 その後、レイヤー4から5、6から7までは通常のモデルと同様に動作し、7番目のレイヤーが完了すると、データをしばしばレイヤー0に戻す必要があります(またはラベルを最後のレイヤーに送信します)。これで損失を計算し、オプティマイザが作業を開始できます。 問題点: - 主な欠点、およびなぜこれを「単純な」MPと呼ぶのかは、1つを除いてすべてのGPUがどんな瞬間でもアイドル状態であることです。したがって、4つのGPUを使用する場合、単純なMPは、1つのGPUのメモリ容量を4倍にするのとほぼ同じであり、ハードウェアの残りを無視します。さらに、データのコピーのオーバーヘッドがあることを忘れてはいけません。したがって、4枚の6GBのカードは、データのコピーのオーバーヘッドがない1枚の24GBのカードと同じサイズを収容できるでしょうが、後者はトレーニングをより迅速に完了します。ただし、たとえば40GBのカードがあり、45GBのモデルを収める必要がある場合、勾配とオプティマイザの状態のためにほとんど収めることができません。 - 共有の埋め込みは、GPU間でコピーする必要があるかもしれません。 パイプライン並列処理(PP)は、ほぼ単純なMPと同じですが、GPUがアイドル状態になる問題を解決し、入力バッチをマイクロバッチに分割し、パイプラインを人工的に作成することにより、異なるGPUが計算プロセスに同時に参加できるようにします。 以下は、[GPipe論文](https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html)からの図で、上部には単純なMP、下部にはPPが示されています: ![mp-pp](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-gpipe-bubble.png) この図から、PPがGPUがアイドル状態の領域である「バブル」を少なく持つことがわかります。アイドル状態の部分は「バブル」と呼ばれます。 図の両方の部分は、4つのGPUがパイプラインに参加している4の次元の並列性を示しています。つまり、4つのパイプステージF0、F1、F2、F3のフォワードパスがあり、逆順のバックワードパスB3、B2、B1、B0があります。 PPは調整する新しいハイパーパラメータを導入します。それは `chunks` で、同じパイプステージを通じて連続して送信されるデータのチャンクの数を定義します。たとえば、下の図では `chunks=4` が表示されています。GPU0はチャンク0、1、2、3(F0,0、F0,1、F0,2、F0,3)で同じフォワードパスを実行し、他のGPUが作業を開始し始めるのを待ってから、GPU0はチャンク3、2、1、0(B0,3、B0,2、B0,1、B0,0)で逆順パスを実行します。 注意すべきは、概念的にはこれが勾配蓄積ステップ(GAS)と同じコンセプトであることです。PyTorchは `chunks` を使用し、DeepSpeedは同じハイパーパラメータをGASと呼びます。 `chunks` の導入により、PPはマイクロバッチ(MBS)の概念を導入します。DPはグローバルデータバッチサイズをミニバッチに分割します。したがって、DPの次数が4で、グローバルバッチサイズが1024の場合、4つのミニバッチ(それぞれ256)に分割されます(1024/4)。そして、`chunks`(またはGAS)の数が32である場合、マイクロバッチサイズは8になります(256/32)。各パイプラインステージは1つのマイクロバッチで作業します。 DP + PPセットアップのグローバルバッチサイズを計算するには、`mbs*chunks*dp_degree`(`8*32*4=1024`)を行います。 図に戻りましょう。 `chunks=1` であれば、非効率な単純なMPになります。非常に大きな `chunks` 値を使用すると、非常に小さなマイクロバッチサイズになり、効率があまり高くないかもしれません。したがって、GPUの効率的な利用を最大化する値を見つけるために実験する必要があります。これは、バブルのサイズを最小限にすることに対応する、すべての参加GPUにわたる高い並行GPU利用を可能にするためです。 2つのソリューショングループがあります。従来のパイプラインAPIソリューションと、ユーザーのモデルを大幅に変更する必要があるより現代的なソリューションです。 従来のパイプラインAPIソリューション: - PyTorch - DeepSpeed - Megatron-LM 現代的なソリューション: - Varuna - Sagemaker 従来のパイプラインAPIソリューションの問題点: - モデルをかなり変更する必要があるため、Pipelineはモジュールの通常のフローを`nn.Sequential`シーケンスに再書き込む必要があり、モデルの設計を変更することが必要です。 - 現在、Pipeline APIは非常に制限的です。最初のパイプラインステージに渡されるPython変数のセットがある場合、回避策を見つける必要があります。現在、パイプラインインターフェースでは、唯一のテンソルまたはテンソルのタプルを入力と出力として要求しています。これらのテンソルはバッチサイズを最初の次元として持っている必要があります。パイプラインはミニバッチをマイクロバッチに分割します。可能な改善点については、こちらの議論が行われています:https://github.com/pytorch/pytorch/pull/50693 - パイプステージのレベルでの条件付き制御フローは不可能です。例えば、T5のようなエンコーダーデコーダーモデルは、条件付きエンコーダーステージを処理するために特別な回避策が必要です。 - 各レイヤーを配置する必要があるため、1つのモデルの出力が他のモデルの入力になるようにします。 VarunaとSageMakerとの実験はまだ行っていませんが、彼らの論文によれば、上記で述べた問題のリストを克服し、ユーザーのモデルにははるかに小さな変更しか必要としないと報告されています。 実装: - [Pytorch](https://pytorch.org/docs/stable/pipeline.html) (initial support in pytorch-1.8, and progressively getting improved in 1.9 and more so in 1.10). Some [examples](https://github.com/pytorch/pytorch/blob/master/benchmarks/distributed/pipeline/pipe.py) - [DeepSpeed](https://www.deepspeed.ai/tutorials/pipeline/) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) has an internal implementation - no API. - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - this is a proprietary solution that can only be used on AWS. - [OSLO](https://github.com/tunib-ai/oslo) - この実装は、Hugging Face Transformersに基づいています。 🤗 Transformersのステータス: この執筆時点では、いずれのモデルも完全なPP(パイプライン並列処理)をサポートしていません。GPT2モデルとT5モデルは単純なMP(モデル並列処理)サポートを持っています。主な障害は、モデルを`nn.Sequential`に変換できず、すべての入力がテンソルである必要があることです。現在のモデルには、変換を非常に複雑にする多くの機能が含まれており、これらを削除する必要があります。 他のアプローチ: DeepSpeed、Varuna、およびSageMakerは、[交互にパイプラインを実行](https://docs.aws.amazon.com/sagemaker/latest/dg/model-parallel-core-features.html)するコンセプトを使用しています。ここでは、バックワードパスを優先させてバブル(アイドル時間)をさらに最小限に抑えます。 Varunaは、最適なスケジュールを発見するためにシミュレーションを使用してスケジュールをさらに改善しようとします。 OSLOは、`nn.Sequential`の変換なしでTransformersに基づくパイプライン並列処理を実装しています。 ## Tensor Parallelism テンソル並列処理では、各GPUがテンソルのスライスのみを処理し、全体が必要な操作のためにのみ完全なテンソルを集約します。 このセクションでは、[Megatron-LM](https://github.com/NVIDIA/Megatron-LM)論文からのコンセプトと図を使用します:[GPUクラスタでの効率的な大規模言語モデルトレーニング](https://arxiv.org/abs/2104.04473)。 どのトランスフォーマの主要な構築要素は、完全に接続された`nn.Linear`に続く非線形アクティベーション`GeLU`です。 Megatronの論文の表記法に従って、行列の乗算部分を`Y = GeLU(XA)`と書くことができます。ここで、`X`と`Y`は入力ベクトルと出力ベクトルで、`A`は重み行列です。 行列の計算を行列形式で見ると、行列乗算を複数のGPUで分割できる方法が簡単に理解できます: ![Parallel GEMM](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_gemm.png) 重み行列`A`を`N`個のGPUに対して列ごとに分割し、並列で行列乗算`XA_1`から`XA_n`を実行すると、`N`個の出力ベクトル`Y_1、Y_2、...、Y_n`が得られ、それらを独立して`GeLU`に供給できます: ![独立したGeLU](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-independent-gelu.png) この原理を使用して、最後まで同期が必要ないまま、任意の深さのMLPを更新できます。Megatron-LMの著者はそのための有用なイラストを提供しています: ![並列シャード処理](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_shard_processing.png) マルチヘッドアテンションレイヤーを並列化することはさらに簡単です。それらは既に複数の独立したヘッドを持っているため、本質的に並列です! ![並列セルフアテンション](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-tp-parallel_self_attention.png) 特別な考慮事項:TPには非常に高速なネットワークが必要であり、したがって1つのノードを超えてTPを実行しないことがお勧めされません。実際には、1つのノードに4つのGPUがある場合、最大のTP度数は4です。TP度数8が必要な場合は、少なくとも8つのGPUを持つノードを使用する必要があります。 このセクションは、元のより詳細な[TPの概要](https://github.com/huggingface/transformers/issues/10321#issuecomment-783543530)に基づいています。 by [@anton-l](https://github.com/anton-l)。 SageMakerは、より効率的な処理のためにTPとDPを組み合わせて使用します。 代替名: - [DeepSpeed](https://github.com/microsoft/DeepSpeed)はこれを「テンソルスライシング」と呼びます。詳細は[DeepSpeedの特徴](https://www.deepspeed.ai/training/#model-parallelism)をご覧ください。 実装例: - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM)には、モデル固有の内部実装があります。 - [parallelformers](https://github.com/tunib-ai/parallelformers)(現時点では推論のみ)。 - [SageMaker](https://arxiv.org/abs/2111.05972) - これはAWSでのみ使用できるプロプライエタリなソリューションです。 - [OSLO](https://github.com/tunib-ai/oslo)には、Transformersに基づいたテンソル並列実装があります。 🤗 Transformersの状況: - コア: まだコアには実装されていません。 - ただし、推論が必要な場合、[parallelformers](https://github.com/tunib-ai/parallelformers)はほとんどのモデルに対してサポートを提供します。これがコアに実装されるまで、これを使用できます。そして、トレーニングモードもサポートされることを期待しています。 - Deepspeed-Inferenceでは、BERT、GPT-2、およびGPT-NeoモデルをCUDAカーネルベースの高速推論モードでサポートしています。詳細は[こちら](https://www.deepspeed.ai/tutorials/inference-tutorial/)をご覧ください。 ## DP+PP DeepSpeedの[パイプラインチュートリアル](https://www.deepspeed.ai/tutorials/pipeline/)からの次の図は、DPをPPと組み合わせる方法を示しています。 ![dp-pp-2d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero-dp-pp.png) ここで重要なのは、DPランク0がGPU2を見えなくし、DPランク1がGPU3を見えなくすることです。DPにとって、存在するのはGPU 0 と 1 のみで、それらの2つのGPUのようにデータを供給します。GPU0はPPを使用してGPU2に一部の負荷を「秘密裏に」オフロードし、GPU1も同様にGPU3を支援に引き入れます。 各次元には少なくとも2つのGPUが必要ですので、ここでは少なくとも4つのGPUが必要です。 実装例: - [DeepSpeed](https://github.com/microsoft/DeepSpeed) - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformersの状況: まだ実装されていません ## DP+PP+TP さらに効率的なトレーニングを行うために、3Dパラレリズムを使用し、PPをTPとDPと組み合わせます。これは次の図で示されています。 ![dp-pp-tp-3d](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-deepspeed-3d.png) この図は[3Dパラレリズム:兆パラメータモデルへのスケーリング](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/)というブログ投稿から取得されたもので、おすすめの読み物です。 各次元には少なくとも2つのGPUが必要ですので、ここでは少なくとも8つのGPUが必要です。 実装例: - [DeepSpeed](https://github.com/microsoft/DeepSpeed) - DeepSpeedには、さらに効率的なDPであるZeRO-DPと呼ばれるものも含まれています。 - [Megatron-LM](https://github.com/NVIDIA/Megatron-LM) - [Varuna](https://github.com/microsoft/varuna) - [SageMaker](https://arxiv.org/abs/2111.05972) - [OSLO](https://github.com/tunib-ai/oslo) 🤗 Transformersの状況: まだ実装されていません。PPとTPがないため。 ## ZeRO DP+PP+TP DeepSpeedの主要な機能の1つはZeROで、これはDPの拡張機能です。これについてはすでに「ZeROデータ並列化」で説明されています。通常、これは単独で動作する機能で、PPやTPは必要ありません。しかし、PPとTPと組み合わせることもできます。 ZeRO-DPがPPと組み合わされる場合、通常はZeROステージ1(オプティマイザーシャーディング)のみが有効になります。 ZeROステージ2(勾配シャーディング)をパイプライン並列化と組み合わせて使用する理論的な可能性はありますが、性能に悪影響を及ぼします。各マイクロバッチごとに勾配をシャーディングする前に、勾配を集約するための追加のリダクションスキャッター集計が必要で、通信オーバーヘッドが発生する可能性があります。パイプライン並列化の性質上、小さなマイクロバッチが使用され、計算の集中度(マイクロバッチサイズ)をバランスにかけ、パイプラインバブル(マイクロバッチ数)を最小限に抑えることに焦点が当てられています。したがって、これらの通信コストは影響を及ぼすでしょう。 さらに、PPには通常よりも少ない層が含まれており、メモリの節約はそれほど大きくありません。PPは既に勾配サイズを「1/PP」に削減するため、勾配シャーディングの節約は純粋なDPよりもはるかに重要ではありません。 ZeROステージ3も同様の理由で適していません - より多くのノード間通信が必要です。 そして、ZeROを持っているので、もう一つの利点はZeRO-Offloadです。これはステージ1オプティマイザーステートをCPUにオフロードできます。 実装例: - [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed)と[BigScienceからのMegatron-Deepspeed](https://github.com/bigscience-workshop/Megatron-DeepSpeed)は、前者のリポジトリのフォークです。 - [OSLO](https://github.com/tunib-ai/oslo) 重要な論文: - [DeepSpeedとMegatronを使用したMegatron-Turing NLG 530Bのトレーニング](https://arxiv.org/abs/2201.11990) 🤗 Transformersの状況: まだ実装されていません。PPとTPがないため。 ## FlexFlow [FlexFlow](https://github.com/flexflow/FlexFlow)は、わずかに異なるアプローチで並列化の問題を解決します。 論文: [Zhihao Jia、Matei Zaharia、Alex Aikenによる "Deep Neural Networksのデータとモデルの並列化を超えて"](https://arxiv.org/abs/1807.05358) FlexFlowは、サンプル-オペレータ-属性-パラメータの4D並列化を行います。 1. サンプル = データ並列化(サンプル単位の並列化) 2. オペレータ = 単一の操作をいくつかのサブ操作に並列化 3. 属性 = データ並列化(長さ方向の並列化) 4. パラメータ = モデル並列化(次元に関係なく、水平または垂直) 例: * サンプル シーケンス長512の10バッチを考えてみましょう。これらをサンプル次元で2つのデバイスに並列化すると、10 x 512が5 x 2 x 512になります。 * オペレータ 層正規化を行う場合、まずstdを計算し、次にmeanを計算し、データを正規化できます。オペレータの並列化により、stdとmeanを並列に計算できます。したがって、オペレータ次元で2つのデバイス(cuda:0、cuda:1)に並列化すると、最初に入力データを両方のデバイスにコピーし、cuda:0でstdを計算し、cuda:1でmeanを同時に計算します。 * 属性 10バッチの512長があります。これらを属性次元で2つのデバイスに並列化すると、10 x 512が10 x 2 x 256になります。 * パラメータ これはテンソルモデルの並列化または単純な層ごとのモデルの並列化と似ています。 このフレームワークの重要性は、(1)GPU/TPU/CPU対(2)RAM/DRAM対(3)高速内部接続/低速外部接続などのリソースを取り、これらすべてをアルゴリズムによって自動的に最適化することです。どの並列化をどこで使用するかをアルゴリズム的に決定します。 非常に重要な側面の1つは、FlexFlowは静的で固定のワークロードを持つモデルのために設計されており、動的な動作を持つモデルはイテレーションごとに異なる並列化戦略を好む場合があることです。 したがって、このフレームワークの約束は非常に魅力的です。選択したクラスタで30分間のシミュレーションを実行し、この特定の環境を最適に利用するための最良の戦略を提供します。部分を追加/削除/置換すると、それに対して実行して再最適化プランを作成します。その後、トレーニングできます。異なるセットアップには独自の最適化があります。 🤗 Transformersの現在の状況: まだ統合されていません。すでに[transformers.utils.fx](https://github.com/huggingface/transformers/blob/master/src/transformers/utils/fx.py)を使用してモデルがFXトレース可能であるため、FlexFlowを動作させるために必要な手順を誰かが見つける必要があります。 ## Which Strategy To Use When ここでは、どの並列化戦略をいつ使用するかの非常におおまかなアウトラインを示します。各リストの最初が通常よりも速いことが一般的です。 **⇨ 単一GPU** * モデルが単一GPUに収まる場合: 1. 通常の使用 * モデルが単一GPUに収まらない場合: 1. ZeRO + CPUをオフロードし、オプションでNVMeをオフロード 2. 上記に加えて、最大のレイヤーが単一GPUに収まらない場合、[Memory Centric Tiling](https://deepspeed.readthedocs.io/en/latest/zero3.html#memory-centric-tiling)(詳細は以下参照)を有効化 * 最大のレイヤーが単一GPUに収まらない場合: 1. ZeROを使用しない場合 - TPを有効化する必要があります。なぜなら、PPだけでは収めることができないからです。 2. ZeROを使用する場合は、上記の「単一GPU」のエントリと同じものを参照してください **⇨ 単一ノード/マルチGPU** * モデルが単一GPUに収まる場合: 1. DDP - 分散データ並列 2. ZeRO - 状況と使用される構成に依存して速いかどうかが異なることがあります * モデルが単一GPUに収まらない場合: 1. PP 2. ZeRO 3. TP 非常に高速なノード内接続がNVLINKまたはNVSwitchである場合、これらのすべてはほとんど同等の性能です。これらがない場合、PPはTPまたはZeROよりも速くなります。TPの度合いも違いを生じるかもしれません。特定のセットアップで勝者を見つけるために実験するのが最善です。 TPはほとんど常に単一ノード内で使用されます。つまり、TPサイズ <= ノードあたりのGPUです。 * 最大のレイヤーが単一GPUに収まらない場合: 1. ZeROを使用しない場合 - TPを使用する必要があります。なぜなら、PPだけでは収めることができないからです。 2. ZeROを使用する場合は、上記の「単一GPU」のエントリと同じものを参照してください **⇨ マルチノード/マルチGPU** * 高速なノード間接続がある場合: 1. ZeRO - モデルへのほとんどの変更が不要です 2. PP+TP+DP - 通信が少なく、モデルに大規模な変更が必要です * 遅いノード間接続があり、GPUメモリが少ない場合: 1. DP+PP+TP+ZeRO-1
transformers/docs/source/ja/perf_train_gpu_many.md/0
{ "file_path": "transformers/docs/source/ja/perf_train_gpu_many.md", "repo_id": "transformers", "token_count": 18222 }
262
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Automatic speech recognition [[open-in-colab]] <Youtube id="TksaY_FDgnk"/> 自動音声認識 (ASR) は音声信号をテキストに変換し、一連の音声入力をテキスト出力にマッピングします。 Siri や Alexa などの仮想アシスタントは ASR モデルを使用してユーザーを日常的に支援しており、ライブキャプションや会議中のメモ取りなど、他にも便利なユーザー向けアプリケーションが数多くあります。 このガイドでは、次の方法を説明します。 1. [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) データセットの [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) を微調整して、音声をテキストに書き起こします。 2. 微調整したモデルを推論に使用します。 <Tip> このチュートリアルで説明するタスクは、次のモデル アーキテクチャでサポートされています。 <!--This tip is automatically generated by `make fix-copies`, do not fill manually!--> [Data2VecAudio](../model_doc/data2vec-audio), [Hubert](../model_doc/hubert), [M-CTC-T](../model_doc/mctct), [SEW](../model_doc/sew), [SEW-D](../model_doc/sew-d), [UniSpeech](../model_doc/unispeech), [UniSpeechSat](../model_doc/unispeech-sat), [Wav2Vec2](../model_doc/wav2vec2), [Wav2Vec2-Conformer](../model_doc/wav2vec2-conformer), [WavLM](../model_doc/wavlm) <!--End of the generated tip--> </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install transformers datasets evaluate jiwer ``` モデルをアップロードしてコミュニティと共有できるように、Hugging Face アカウントにログインすることをお勧めします。プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load MInDS-14 dataset まず、🤗 データセット ライブラリから [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) データセットの小さいサブセットをロードします。これにより、完全なデータセットのトレーニングにさらに時間を費やす前に、実験してすべてが機能することを確認する機会が得られます。 ```py >>> from datasets import load_dataset, Audio >>> minds = load_dataset("PolyAI/minds14", name="en-US", split="train[:100]") ``` [`~Dataset.train_test_split`] メソッドを使用して、データセットの `train` 分割をトレイン セットとテスト セットに分割します。 ```py >>> minds = minds.train_test_split(test_size=0.2) ``` 次に、データセットを見てみましょう。 ```py >>> minds DatasetDict({ train: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 16 }) test: Dataset({ features: ['path', 'audio', 'transcription', 'english_transcription', 'intent_class', 'lang_id'], num_rows: 4 }) }) ``` データセットには`lang_id`や`english_transcription`などの多くの有用な情報が含まれていますが、このガイドでは「`audio`」と「`transciption`」に焦点を当てます。 [`~datasets.Dataset.remove_columns`] メソッドを使用して他の列を削除します。 ```py >>> minds = minds.remove_columns(["english_transcription", "intent_class", "lang_id"]) ``` もう一度例を見てみましょう。 ```py >>> minds["train"][0] {'audio': {'array': array([-0.00024414, 0. , 0. , ..., 0.00024414, 0.00024414, 0.00024414], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 8000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 次の 2 つのフィールドがあります。 - `audio`: 音声ファイルをロードしてリサンプリングするために呼び出す必要がある音声信号の 1 次元の `array`。 - `transcription`: ターゲットテキスト。 ## Preprocess 次のステップでは、Wav2Vec2 プロセッサをロードしてオーディオ信号を処理します。 ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base") ``` MInDS-14 データセットのサンプリング レートは 8000kHz です (この情報は [データセット カード](https://huggingface.co/datasets/PolyAI/minds14) で確認できます)。つまり、データセットを再サンプリングする必要があります。事前トレーニングされた Wav2Vec2 モデルを使用するには、16000kHz に設定します。 ```py >>> minds = minds.cast_column("audio", Audio(sampling_rate=16_000)) >>> minds["train"][0] {'audio': {'array': array([-2.38064706e-04, -1.58618059e-04, -5.43987835e-06, ..., 2.78103951e-04, 2.38446111e-04, 1.18740834e-04], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'sampling_rate': 16000}, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~APP_ERROR/602ba9e2963e11ccd901cd4f.wav', 'transcription': "hi I'm trying to use the banking app on my phone and currently my checking and savings account balance is not refreshing"} ``` 上の `transcription` でわかるように、テキストには大文字と小文字が混在しています。 Wav2Vec2 トークナイザーは大文字のみでトレーニングされるため、テキストがトークナイザーの語彙と一致することを確認する必要があります。 ```py >>> def uppercase(example): ... return {"transcription": example["transcription"].upper()} >>> minds = minds.map(uppercase) ``` 次に、次の前処理関数を作成します。 1. `audio`列を呼び出して、オーディオ ファイルをロードしてリサンプリングします。 2. オーディオ ファイルから `input_values` を抽出し、プロセッサを使用して `transcription` 列をトークン化します。 ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch = processor(audio["array"], sampling_rate=audio["sampling_rate"], text=batch["transcription"]) ... batch["input_length"] = len(batch["input_values"][0]) ... return batch ``` データセット全体に前処理関数を適用するには、🤗 Datasets [`~datasets.Dataset.map`] 関数を使用します。 `num_proc` パラメータを使用してプロセスの数を増やすことで、`map` を高速化できます。 [`~datasets.Dataset.remove_columns`] メソッドを使用して、不要な列を削除します。 ```py >>> encoded_minds = minds.map(prepare_dataset, remove_columns=minds.column_names["train"], num_proc=4) ``` 🤗 Transformers には ASR 用のデータ照合器がないため、[`DataCollat​​orWithPadding`] を調整してサンプルのバッチを作成する必要があります。また、テキストとラベルが (データセット全体ではなく) バッチ内の最も長い要素の長さに合わせて動的に埋め込まれ、均一な長さになります。 `padding=True` を設定すると、`tokenizer` 関数でテキストを埋め込むことができますが、動的な埋め込みの方が効率的です。 他のデータ照合器とは異なり、この特定のデータ照合器は、`input_values`と `labels`」に異なるパディング方法を適用する必要があります。 ```py >>> import torch >>> from dataclasses import dataclass, field >>> from typing import Any, Dict, List, Optional, Union >>> @dataclass ... class DataCollatorCTCWithPadding: ... processor: AutoProcessor ... padding: Union[bool, str] = "longest" ... def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: ... # split inputs and labels since they have to be of different lengths and need ... # different padding methods ... input_features = [{"input_values": feature["input_values"][0]} for feature in features] ... label_features = [{"input_ids": feature["labels"]} for feature in features] ... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt") ... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt") ... # replace padding with -100 to ignore loss correctly ... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) ... batch["labels"] = labels ... return batch ``` 次に、`DataCollat​​orForCTCWithPadding` をインスタンス化します。 ```py >>> data_collator = DataCollatorCTCWithPadding(processor=processor, padding="longest") ``` ## Evaluate トレーニング中にメトリクスを含めると、多くの場合、モデルのパフォーマンスを評価するのに役立ちます。 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) ライブラリを使用して、評価メソッドをすばやくロードできます。このタスクでは、[単語エラー率](https://huggingface.co/spaces/evaluate-metric/wer) (WER) メトリクスを読み込みます (🤗 Evaluate [クイック ツアー](https://huggingface.co/docs/evaluate/a_quick_tour) を参照して、メトリクスをロードして計算する方法の詳細を確認してください)。 ```py >>> import evaluate >>> wer = evaluate.load("wer") ``` 次に、予測とラベルを [`~evaluate.EvaluationModule.compute`] に渡して WER を計算する関数を作成します。 ```py >>> import numpy as np >>> def compute_metrics(pred): ... pred_logits = pred.predictions ... pred_ids = np.argmax(pred_logits, axis=-1) ... pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id ... pred_str = processor.batch_decode(pred_ids) ... label_str = processor.batch_decode(pred.label_ids, group_tokens=False) ... wer = wer.compute(predictions=pred_str, references=label_str) ... return {"wer": wer} ``` これで`compute_metrics`関数の準備が整いました。トレーニングをセットアップするときにこの関数に戻ります。 ## Train <frameworkcontent> <pt> <Tip> [`Trainer`] を使用したモデルの微調整に慣れていない場合は、[ここ](../training#train-with-pytorch-trainer) の基本的なチュートリアルをご覧ください。 </Tip> これでモデルのトレーニングを開始する準備が整いました。 [`AutoModelForCTC`] で Wav2Vec2 をロードします。 `ctc_loss_reduction` パラメータで適用する削減を指定します。多くの場合、デフォルトの合計ではなく平均を使用する方が適切です。 ```py >>> from transformers import AutoModelForCTC, TrainingArguments, Trainer >>> model = AutoModelForCTC.from_pretrained( ... "facebook/wav2vec2-base", ... ctc_loss_reduction="mean", ... pad_token_id=processor.tokenizer.pad_token_id, ... ) ``` この時点で残っている手順は次の 3 つだけです。 1. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。唯一の必須パラメータは、モデルの保存場所を指定する `output_dir` です。 `push_to_hub=True`を設定して、このモデルをハブにプッシュします (モデルをアップロードするには、Hugging Face にサインインする必要があります)。各エポックの終了時に、[`トレーナー`] は WER を評価し、トレーニング チェックポイントを保存します。 2. トレーニング引数を、モデル、データセット、トークナイザー、データ照合器、および `compute_metrics` 関数とともに [`Trainer`] に渡します。 3. [`~Trainer.train`] を呼び出してモデルを微調整します。 ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_asr_mind_model", ... per_device_train_batch_size=8, ... gradient_accumulation_steps=2, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=2000, ... gradient_checkpointing=True, ... fp16=True, ... group_by_length=True, ... evaluation_strategy="steps", ... per_device_eval_batch_size=8, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... load_best_model_at_end=True, ... metric_for_best_model="wer", ... greater_is_better=False, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=encoded_minds["train"], ... eval_dataset=encoded_minds["test"], ... tokenizer=processor, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` トレーニングが完了したら、 [`~transformers.Trainer.push_to_hub`] メソッドを使用してモデルをハブに共有し、誰もがモデルを使用できるようにします。 ```py >>> trainer.push_to_hub() ``` </pt> </frameworkcontent> <Tip> 自動音声認識用にモデルを微調整する方法のより詳細な例については、英語 ASR および英語のこのブログ [投稿](https://huggingface.co/blog/fine-tune-wav2vec2-english) を参照してください。多言語 ASR については、この [投稿](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) を参照してください。 </Tip> ## Inference モデルを微調整したので、それを推論に使用できるようになりました。 推論を実行したい音声ファイルをロードします。必要に応じて、オーディオ ファイルのサンプリング レートをモデルのサンプリング レートと一致するようにリサンプリングすることを忘れないでください。 ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> sampling_rate = dataset.features["audio"].sampling_rate >>> audio_file = dataset[0]["audio"]["path"] ``` 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。モデルを使用して自動音声認識用の`pipeline`をインスタンス化し、オーディオ ファイルをそれに渡します。 ```py >>> from transformers import pipeline >>> transcriber = pipeline("automatic-speech-recognition", model="stevhliu/my_awesome_asr_minds_model") >>> transcriber(audio_file) {'text': 'I WOUD LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'} ``` <Tip> 転写はまあまあですが、もっと良くなる可能性があります。さらに良い結果を得るには、より多くの例でモデルを微調整してみてください。 </Tip> 必要に応じて、「パイプライン」の結果を手動で複製することもできます。 <frameworkcontent> <pt> プロセッサをロードしてオーディオ ファイルと文字起こしを前処理し、`input`を PyTorch テンソルとして返します。 ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") ``` Pass your inputs to the model and return the logits: ```py >>> from transformers import AutoModelForCTC >>> model = AutoModelForCTC.from_pretrained("stevhliu/my_awesome_asr_mind_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` 最も高い確率で予測された `input_ids` を取得し、プロセッサを使用して予測された `input_ids` をデコードしてテキストに戻します。 ```py >>> import torch >>> predicted_ids = torch.argmax(logits, dim=-1) >>> transcription = processor.batch_decode(predicted_ids) >>> transcription ['I WOUL LIKE O SET UP JOINT ACOUNT WTH Y PARTNER'] ``` </pt> </frameworkcontent>
transformers/docs/source/ja/tasks/asr.md/0
{ "file_path": "transformers/docs/source/ja/tasks/asr.md", "repo_id": "transformers", "token_count": 7201 }
263
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformers Agents <Tip warning={true}> Transformers Agentsは、いつでも変更される可能性のある実験的なAPIです。エージェントが返す結果は、APIまたは基礎となるモデルが変更される可能性があるため、異なることがあります。 </Tip> Transformersバージョンv4.29.0は、*ツール*と*エージェント*のコンセプトを基に構築されています。この[colab](https://colab.research.google.com/drive/1c7MHD-T1forUPGcC_jlwsIptOzpG3hSj)で試すことができます。 要するに、これはtransformersの上に自然言語APIを提供するものです:私たちは一連の厳選されたツールを定義し、自然言語を解釈し、これらのツールを使用するエージェントを設計します。これは設計上拡張可能です。私たちはいくつかの関連するツールを厳選しましたが、コミュニティによって開発された任意のツールを使用するためにシステムを簡単に拡張できる方法も示します。 この新しいAPIで何ができるかのいくつかの例から始めましょう。特に多モーダルなタスクに関して強力ですので、画像を生成したりテキストを読み上げたりするのに最適です。 上記のテキストの上に、日本語の翻訳を提供します。 ```py agent.run("Caption the following image", image=image) ``` | **Input** | **Output** | |-----------------------------------------------------------------------------------------------------------------------------|-----------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beaver.png" width=200> | A beaver is swimming in the water | --- ```py agent.run("Read the following text out loud", text=text) ``` | **Input** | **Output** | |-------------------------------------------------------------------------------------------------------------------------|----------------------------------------------| | A beaver is swimming in the water | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tts_example.wav" type="audio/wav"> your browser does not support the audio element. </audio> --- ```py agent.run( "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", document=document, ) ``` | **Input** | **Output** | |-----------------------------------------------------------------------------------------------------------------------------|----------------| | <img src="https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/0/image/image.jpg" width=200> | ballroom foyer | ## Quickstart `agent.run`を使用する前に、エージェントをインスタンス化する必要があります。エージェントは、大規模な言語モデル(LLM)です。 OpenAIモデルとBigCode、OpenAssistantからのオープンソースの代替モデルをサポートしています。OpenAIモデルはパフォーマンスが優れていますが、OpenAIのAPIキーが必要であり、無料で使用することはできません。一方、Hugging FaceはBigCodeとOpenAssistantモデルのエンドポイントへの無料アクセスを提供しています。 まず、デフォルトの依存関係をすべてインストールするために`agents`のエクストラをインストールしてください。 ```bash pip install transformers[agents] ``` OpenAIモデルを使用するには、`openai`の依存関係をインストールした後、`OpenAiAgent`をインスタンス化します。 ```bash pip install openai ``` ```py from transformers import OpenAiAgent agent = OpenAiAgent(model="text-davinci-003", api_key="<your_api_key>") ``` BigCodeまたはOpenAssistantを使用するには、まずログインしてInference APIにアクセスしてください。 ```py from huggingface_hub import login login("<YOUR_TOKEN>") ``` 次に、エージェントをインスタンス化してください。 ```py from transformers import HfAgent # Starcoder agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") # StarcoderBase # agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoderbase") # OpenAssistant # agent = HfAgent(url_endpoint="https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5") ``` これは、Hugging Faceが現在無料で提供している推論APIを使用しています。このモデル(または別のモデル)の独自の推論エンドポイントをお持ちの場合は、上記のURLエンドポイントをご自分のURLエンドポイントで置き換えることができます。 <Tip> StarCoderとOpenAssistantは無料で利用でき、シンプルなタスクには非常に優れた性能を発揮します。ただし、より複雑なプロンプトを処理する際には、チェックポイントが十分でないことがあります。そのような場合には、現時点ではオープンソースではないものの、パフォーマンスが向上する可能性のあるOpenAIモデルを試してみることをお勧めします。 </Tip> これで準備が整いました!これから、あなたが利用できる2つのAPIについて詳しく説明します。 ### Single execution (run) 単一実行メソッドは、エージェントの [`~Agent.run`] メソッドを使用する場合です。 ```py agent.run("Draw me a picture of rivers and lakes.") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> これは、実行したいタスクに適したツール(またはツール)を自動的に選択し、適切に実行します。1つまたは複数のタスクを同じ命令で実行することができます(ただし、命令が複雑であるほど、エージェントが失敗する可能性が高くなります)。 ```py agent.run("Draw me a picture of the sea then transform the picture to add an island") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sea_and_island.png" width=200> <br/> [`~Agent.run`] 操作は独立して実行できますので、異なるタスクで何度も実行することができます。 注意点として、あなたの `agent` は単なる大規模な言語モデルであるため、プロンプトのわずかな変更でも完全に異なる結果が得られる可能性があります。したがって、実行したいタスクをできるだけ明確に説明することが重要です。良いプロンプトの書き方については、[こちら](custom_tools#writing-good-user-inputs) で詳しく説明しています。 実行ごとに状態を保持したり、テキスト以外のオブジェクトをエージェントに渡したりする場合は、エージェントが使用する変数を指定することができます。例えば、最初の川や湖の画像を生成し、その画像に島を追加するようにモデルに指示するには、次のように行うことができます: ```python picture = agent.run("Generate a picture of rivers and lakes.") updated_picture = agent.run("Transform the image in `picture` to add an island to it.", picture=picture) ``` <Tip> これは、モデルがあなたのリクエストを理解できない場合や、ツールを混同する場合に役立つことがあります。例えば: ```py agent.run("Draw me the picture of a capybara swimming in the sea") ``` ここでは、モデルは2つの方法で解釈できます: - `text-to-image`に海で泳ぐカピバラを生成させる - または、`text-to-image`でカピバラを生成し、それを海で泳がせるために`image-transformation`ツールを使用する 最初のシナリオを強制したい場合は、プロンプトを引数として渡すことができます: ```py agent.run("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") ``` </Tip> ### Chat-based execution (チャット) エージェントは、[`~Agent.chat`] メソッドを使用することで、チャットベースのアプローチも可能です。 <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> ```py agent.chat("Transform the picture so that there is a rock in there") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_and_beaver.png" width=200> <br/> これは、指示をまたいで状態を保持したい場合に便利なアプローチで、単一の指示に比べて複雑な指示を処理するのは難しいかもしれません(その場合は [`~Agent.run`] メソッドの方が適しています)。 このメソッドは、非テキスト型の引数や特定のプロンプトを渡したい場合にも使用できます。 ### ⚠️ Remote execution デモンストレーションの目的やすべてのセットアップで使用できるように、リリースのためにいくつかのデフォルトツール用のリモート実行ツールも作成しました。これらは [推論エンドポイント](https://huggingface.co/inference-endpoints) を使用して作成されます。 これらは現在オフになっていますが、リモート実行ツールを自分で設定する方法については、[カスタムツールガイド](./custom_tools) を読むことをお勧めします。 ### What's happening here? What are tools, and what are agents? ![エージェントとツールのダイアグラム](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/diagram.png) #### Agents ここでの「エージェント」とは、大規模な言語モデルのことであり、特定の一連のツールにアクセスできるようにプロンプトを設定しています。 LLM(大規模言語モデル)は、コードの小さなサンプルを生成するのにかなり優れており、このAPIは、エージェントに特定のツールセットを使用してタスクを実行するコードの小さなサンプルを生成させることに利用しています。このプロンプトは、エージェントにタスクとツールの説明を提供することで、エージェントが使用しているツールのドキュメントにアクセスし、関連するコードを生成できるようになります。 #### Tools ツールは非常に単純で、名前と説明からなる単一の関数です。それから、これらのツールの説明を使用してエージェントをプロンプトします。プロンプトを通じて、エージェントに、ツールを使用してクエリで要求されたタスクをどのように実行するかを示します。特に、ツールの期待される入力と出力を示します。 これは新しいツールを使用しており、パイプラインではなくツールを使用しています。なぜなら、エージェントは非常に原子的なツールでより良いコードを生成するからです。パイプラインはよりリファクタリングされ、しばしば複数のタスクを組み合わせています。ツールは非常に単純なタスクに焦点を当てることを意図しています。 #### Code-execution?! このコードは、ツールとツールと一緒に渡される入力のセットで、当社の小規模なPythonインタープリタで実行されます。すでに提供されたツールとprint関数しか呼び出すことができないため、実行できることはすでに制限されています。Hugging Faceのツールに制限されているため、安全だと考えても問題ありません。 さらに、属性の検索やインポートは許可しておらず(それらは渡された入力/出力を処理するためには必要ないはずです)、最も明らかな攻撃は問題ありません(エージェントにそれらを出力するようにプロンプトする必要があります)。超安全な側に立ちたい場合は、追加の引数 return_code=True を指定して run() メソッドを実行できます。その場合、エージェントは実行するコードを返すだけで、実行するかどうかはあなた次第です。 実行は、違法な操作を試みる行またはエージェントが生成したコードに通常のPythonエラーがある場合に停止します。 ### A curated set of tools 私たちは、このようなエージェントを強化できるツールのセットを特定します。以下は、`transformers`に統合されたツールの更新されたリストです: - **ドキュメント質問応答**: 画像形式のドキュメント(PDFなど)が与えられた場合、このドキュメントに関する質問に回答します([Donut](./model_doc/donut)) - **テキスト質問応答**: 長いテキストと質問が与えられた場合、テキスト内の質問に回答します([Flan-T5](./model_doc/flan-t5)) - **無条件の画像キャプション**: 画像にキャプションを付けます!([BLIP](./model_doc/blip)) - **画像質問応答**: 画像が与えられた場合、その画像に関する質問に回答します([VILT](./model_doc/vilt)) - **画像セグメンテーション**: 画像とプロンプトが与えられた場合、そのプロンプトのセグメンテーションマスクを出力します([CLIPSeg](./model_doc/clipseg)) - **音声からテキストへの変換**: 人の話し声のオーディオ録音が与えられた場合、その音声をテキストに転記します([Whisper](./model_doc/whisper)) - **テキストから音声への変換**: テキストを音声に変換します([SpeechT5](./model_doc/speecht5)) - **ゼロショットテキスト分類**: テキストとラベルのリストが与えられた場合、テキストが最も対応するラベルを識別します([BART](./model_doc/bart)) - **テキスト要約**: 長いテキストを1つまたは数文に要約します([BART](./model_doc/bart)) - **翻訳**: テキストを指定された言語に翻訳します([NLLB](./model_doc/nllb)) これらのツールはtransformersに統合されており、手動でも使用できます。たとえば、次のように使用できます: ```py from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### Custom tools 私たちは、厳選されたツールのセットを特定する一方、この実装が提供する主要な価値は、カスタムツールを迅速に作成して共有できる能力だと強く信じています。 ツールのコードをHugging Face Spaceまたはモデルリポジトリにプッシュすることで、エージェントと直接連携してツールを活用できます。[`huggingface-tools` organization](https://huggingface.co/huggingface-tools)には、**transformers非依存**のいくつかのツールが追加されました: - **テキストダウンローダー**: ウェブURLからテキストをダウンロードするためのツール - **テキストから画像へ**: プロンプトに従って画像を生成するためのツール。安定した拡散を活用します - **画像変換**: 初期画像とプロンプトを指定して画像を変更するためのツール。instruct pix2pixの安定した拡散を活用します - **テキストからビデオへ**: プロンプトに従って小さなビデオを生成するためのツール。damo-vilabを活用します 最初から使用しているテキストから画像へのツールは、[*huggingface-tools/text-to-image*](https://huggingface.co/spaces/huggingface-tools/text-to-image)にあるリモートツールです!今後も、この組織および他の組織にさらにこのようなツールをリリースし、この実装をさらに強化していきます。 エージェントはデフォルトで[`huggingface-tools`](https://huggingface.co/huggingface-tools)にあるツールにアクセスできます。 ツールの作成と共有方法、またHubに存在するカスタムツールを活用する方法についての詳細は、[次のガイド](custom_tools)で説明しています。 ### Code generation これまで、エージェントを使用してあなたのためにアクションを実行する方法を示しました。ただし、エージェントはコードを生成するだけで、非常に制限されたPythonインタープリタを使用して実行します。生成されたコードを異なる環境で使用したい場合、エージェントにコードを返すように指示できます。ツールの定義と正確なインポートも含めて。 例えば、以下の命令: ```python agent.run("Draw me a picture of rivers and lakes", return_code=True) ``` 次のコードを返します ```python from transformers import load_tool image_generator = load_tool("huggingface-tools/text-to-image") image = image_generator(prompt="rivers and lakes") ``` その後、自分で変更して実行できます。
transformers/docs/source/ja/transformers_agents.md/0
{ "file_path": "transformers/docs/source/ja/transformers_agents.md", "repo_id": "transformers", "token_count": 7879 }
264
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 사용자 정의 도구와 프롬프트[[custom-tools-and-prompts]] <Tip> Transformers와 관련하여 어떤 도구와 에이전트가 있는지 잘 모르신다면 [Transformers Agents](transformers_agents) 페이지를 먼저 읽어보시기 바랍니다. </Tip> <Tip warning={true}> Transformers Agents는 실험 중인 API로 언제든지 변경될 수 있습니다. API 또는 기반 모델이 변경되기 쉽기 때문에 에이전트가 반환하는 결과도 달라질 수 있습니다. </Tip> 에이전트에게 권한을 부여하고 새로운 작업을 수행하게 하려면 사용자 정의 도구와 프롬프트를 만들고 사용하는 것이 무엇보다 중요합니다. 이 가이드에서는 다음과 같은 내용을 살펴보겠습니다: - 프롬프트를 사용자 정의하는 방법 - 사용자 정의 도구를 사용하는 방법 - 사용자 정의 도구를 만드는 방법 ## 프롬프트를 사용자 정의하기[[customizing-the-prompt]] [Transformers Agents](transformers_agents)에서 설명한 것처럼 에이전트는 [`~Agent.run`] 및 [`~Agent.chat`] 모드에서 실행할 수 있습니다. `run`(실행) 모드와 `chat`(채팅) 모드 모두 동일한 로직을 기반으로 합니다. 에이전트를 구동하는 언어 모델은 긴 프롬프트에 따라 조건이 지정되고, 중지 토큰에 도달할 때까지 다음 토큰을 생성하여 프롬프트를 완수합니다. `chat` 모드에서는 프롬프트가 이전 사용자 입력 및 모델 생성으로 연장된다는 점이 두 모드의 유일한 차이점입니다. 이를 통해 에이전트가 과거 상호작용에 접근할 수 있게 되므로 에이전트에게 일종의 메모리를 제공하는 셈입니다. ### 프롬프트의 구조[[structure-of-the-prompt]] 어떻게 프롬프트 사용자 정의를 잘 할 수 있는지 이해하기 위해 프롬프트의 구조를 자세히 살펴봅시다. 프롬프트는 크게 네 부분으로 구성되어 있습니다. - 1. 도입: 에이전트가 어떻게 행동해야 하는지, 도구의 개념에 대한 설명. - 2. 모든 도구에 대한 설명. 이는 런타임에 사용자가 정의/선택한 도구로 동적으로 대체되는 `<<all_tools>>` 토큰으로 정의됩니다. - 3. 작업 예제 및 해당 솔루션 세트. - 4. 현재 예제 및 해결 요청. 각 부분을 더 잘 이해할 수 있도록 짧은 버전을 통해 `run` 프롬프트가 어떻게 보이는지 살펴보겠습니다: ````text I will ask you to perform a task, your job is to come up with a series of simple commands in Python that will perform the task. [...] You can print intermediate results if it makes sense to do so. Tools: - document_qa: This is a tool that answers a question about a document (pdf). It takes an input named `document` which should be the document containing the information, as well as a `question` that is the question about the document. It returns a text that contains the answer to the question. - image_captioner: This is a tool that generates a description of an image. It takes an input named `image` which should be the image to the caption and returns a text that contains the description in English. [...] Task: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French." I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image. Answer: ```py translated_question = translator(question=question, src_lang="French", tgt_lang="English") print(f"The translated question is {translated_question}.") answer = image_qa(image=image, question=translated_question) print(f"The answer is {answer}") ``` Task: "Identify the oldest person in the `document` and create an image showcasing the result as a banner." I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Answer: ```py answer = document_qa(document, question="What is the oldest person?") print(f"The answer is {answer}.") image = image_generator("A banner showing " + answer) ``` [...] Task: "Draw me a picture of rivers and lakes" I will use the following ```` 도입(*"도구:"* 앞의 텍스트)에서는 모델이 어떻게 작동하고 무엇을 해야 하는지 정확하게 설명합니다. 에이전트는 항상 같은 방식으로 작동해야 하므로 이 부분은 사용자 정의할 필요가 없을 가능성이 높습니다. 두 번째 부분(*"도구"* 아래의 글머리 기호)은 `run` 또는 `chat`을 호출할 때 동적으로 추가됩니다. 정확히 `agent.toolbox`에 있는 도구 수만큼 글머리 기호가 있고, 각 글머리 기호는 도구의 이름과 설명으로 구성됩니다: ```text - <tool.name>: <tool.description> ``` 문서 질의응답 도구를 가져오고 이름과 설명을 출력해서 빠르게 확인해 보겠습니다. ```py from transformers import load_tool document_qa = load_tool("document-question-answering") print(f"- {document_qa.name}: {document_qa.description}") ``` 그러면 다음 결과가 출력됩니다: ```text - document_qa: This is a tool that answers a question about a document (pdf). It takes an input named `document` which should be the document containing the information, as well as a `question` that is the question about the document. It returns a text that contains the answer to the question. ``` 여기서 도구 이름이 짧고 정확하다는 것을 알 수 있습니다. 설명은 두 부분으로 구성되어 있는데, 첫 번째 부분에서는 도구의 기능을 설명하고 두 번째 부분에서는 예상되는 입력 인수와 반환 값을 명시합니다. 에이전트가 도구를 올바르게 사용하려면 좋은 도구 이름과 도구 설명이 매우 중요합니다. 에이전트가 도구에 대해 알 수 있는 유일한 정보는 이름과 설명뿐이므로, 이 두 가지를 정확하게 작성하고 도구 상자에 있는 기존 도구의 스타일과 일치하는지 확인해야 합니다. 특히 이름에 따라 예상되는 모든 인수가 설명에 코드 스타일로 언급되어 있는지, 예상되는 유형과 그 유형이 무엇인지에 대한 설명이 포함되어 있는지 확인하세요. <Tip> 도구에 어떤 이름과 설명이 있어야 하는지 이해하려면 엄선된 Transformers 도구의 이름과 설명을 확인하세요. [`Agent.toolbox`] 속성을 가진 모든 도구를 볼 수 있습니다. </Tip> 세 번째 부분에는 에이전트가 어떤 종류의 사용자 요청에 대해 어떤 코드를 생성해야 하는지 정확하게 보여주는 엄선된 예제 세트가 포함되어 있습니다. 에이전트를 지원하는 대규모 언어 모델은 프롬프트에서 패턴을 인식하고 새로운 데이터로 패턴을 반복하는 데 매우 능숙합니다. 따라서 에이전트가 실제로 올바른 실행 가능한 코드를 생성할 가능성을 극대화하는 방식으로 예제를 작성하는 것이 매우 중요합니다. 한 가지 예를 살펴보겠습니다: ````text Task: "Identify the oldest person in the `document` and create an image showcasing the result as a banner." I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Answer: ```py answer = document_qa(document, question="What is the oldest person?") print(f"The answer is {answer}.") image = image_generator("A banner showing " + answer) ``` ```` 작업 설명, 에이전트가 수행하려는 작업에 대한 설명, 마지막으로 생성된 코드, 이 세 부분으로 구성된 프롬프트는 모델에 반복하여 제공됩니다. 프롬프트의 일부인 모든 예제는 이러한 정확한 패턴으로 되어 있으므로, 에이전트가 새 토큰을 생성할 때 정확히 동일한 패턴을 재현할 수 있습니다. 프롬프트 예제는 Transformers 팀이 선별하고 일련의 [problem statements](https://github.com/huggingface/transformers/blob/main/src/transformers/tools/evaluate_agent.py)에 따라 엄격하게 평가하여 에이전트의 프롬프트가 에이전트의 실제 사용 사례를 최대한 잘 해결할 수 있도록 보장합니다. 프롬프트의 마지막 부분은 다음에 해당합니다: ```text Task: "Draw me a picture of rivers and lakes" I will use the following ``` 이는 에이전트가 완료해야 할 최종적인 미완성 예제입니다. 미완성 예제는 실제 사용자 입력에 따라 동적으로 만들어집니다. 위 예시의 경우 사용자가 다음과 같이 실행했습니다: ```py agent.run("Draw me a picture of rivers and lakes") ``` 사용자 입력 - *즉* Task: *"Draw me a picture of rivers and lakes"*가 프롬프트 템플릿에 맞춰 "Task: <task> \n\n I will use the following"로 캐스팅됩니다. 이 문장은 에이전트에게 조건이 적용되는 프롬프트의 마지막 줄을 구성하므로 에이전트가 이전 예제에서 수행한 것과 정확히 동일한 방식으로 예제를 완료하도록 강력하게 영향을 미칩니다. 너무 자세히 설명하지 않더라도 채팅 템플릿의 프롬프트 구조는 동일하지만 예제의 스타일이 약간 다릅니다. *예를 들면*: ````text [...] ===== Human: Answer the question in the variable `question` about the image stored in the variable `image`. Assistant: I will use the tool `image_qa` to answer the question on the input image. ```py answer = image_qa(text=question, image=image) print(f"The answer is {answer}") ``` Human: I tried this code, it worked but didn't give me a good result. The question is in French Assistant: In this case, the question needs to be translated first. I will use the tool `translator` to do this. ```py translated_question = translator(question=question, src_lang="French", tgt_lang="English") print(f"The translated question is {translated_question}.") answer = image_qa(text=translated_question, image=image) print(f"The answer is {answer}") ``` ===== [...] ```` `run` 프롬프트의 예와는 반대로, 각 `chat` 프롬프트의 예에는 *Human(사람)*과 *Assistant(어시스턴트)* 간에 하나 이상의 교환이 있습니다. 모든 교환은 `run` 프롬프트의 예와 유사한 구조로 되어 있습니다. 사용자의 입력이 *Human:* 뒤에 추가되며, 에이전트에게 코드를 생성하기 전에 수행해야 할 작업을 먼저 생성하라는 메시지가 표시됩니다. 교환은 이전 교환을 기반으로 할 수 있으므로 위와 같이 사용자가 "**이** 코드를 시도했습니다"라고 입력하면 이전에 생성된 에이전트의 코드를 참조하여 과거 교환을 참조할 수 있습니다. `.chat`을 실행하면 사용자의 입력 또는 *작업*이 미완성된 양식의 예시로 캐스팅됩니다: ```text Human: <user-input>\n\nAssistant: ``` 그러면 에이전트가 이를 완성합니다. `run` 명령과 달리 `chat` 명령은 완료된 예제를 프롬프트에 추가하여 에이전트에게 다음 `chat` 차례에 대한 더 많은 문맥을 제공합니다. 이제 프롬프트가 어떻게 구성되어 있는지 알았으니 어떻게 사용자 정의할 수 있는지 살펴봅시다! ### 좋은 사용자 입력 작성하기[[writing-good-user-inputs]] 대규모 언어 모델이 사용자의 의도를 이해하는 능력이 점점 더 향상되고 있지만, 에이전트가 올바른 작업을 선택할 수 있도록 최대한 정확성을 유지하는 것은 큰 도움이 됩니다. 최대한 정확하다는 것은 무엇을 의미할까요? 에이전트는 프롬프트에서 도구 이름 목록과 해당 설명을 볼 수 있습니다. 더 많은 도구가 추가될수록 에이전트가 올바른 도구를 선택하기가 더 어려워지고 실행할 도구의 올바른 순서를 선택하는 것은 더욱 어려워집니다. 일반적인 실패 사례를 살펴보겠습니다. 여기서는 분석할 코드만 반환하겠습니다. ```py from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") agent.run("Show me a tree", return_code=True) ``` 그러면 다음 결과가 출력됩니다: ```text ==Explanation from the agent== I will use the following tool: `image_segmenter` to create a segmentation mask for the image. ==Code generated by the agent== mask = image_segmenter(image, prompt="tree") ``` 우리가 원했던 결과가 아닐 수도 있습니다. 대신 나무 이미지가 생성되기를 원할 가능성이 더 높습니다. 따라서 에이전트가 특정 도구를 사용하도록 유도하려면 도구의 이름과 설명에 있는 중요한 키워드를 사용하는 것이 매우 유용할 수 있습니다. 한번 살펴보겠습니다. ```py agent.toolbox["image_generator"].description ``` ```text 'This is a tool that creates an image according to a prompt, which is a text description. It takes an input named `prompt` which contains the image description and outputs an image. ``` 이름과 설명은 "image", "prompt", "create" 및 "generate" 키워드를 사용합니다. 이 단어들을 사용하면 더 잘 작동할 가능성이 높습니다. 프롬프트를 조금 더 구체화해 보겠습니다. ```py agent.run("Create an image of a tree", return_code=True) ``` 이 코드는 다음 프롬프트를 만들어냅니다: ```text ==Explanation from the agent== I will use the following tool `image_generator` to generate an image of a tree. ==Code generated by the agent== image = image_generator(prompt="tree") ``` 훨씬 낫네요! 저희가 원했던 것과 비슷해 보입니다. 즉, 에이전트가 작업을 올바른 도구에 올바르게 매핑하는 데 어려움을 겪고 있다면 도구 이름과 설명에서 가장 관련성이 높은 키워드를 찾아보고 이를 통해 작업 요청을 구체화해 보세요. ### 도구 설명 사용자 정의하기[[customizing-the-tool-descriptions]] 앞서 살펴본 것처럼 에이전트는 각 도구의 이름과 설명에 액세스할 수 있습니다. 기본 도구에는 매우 정확한 이름과 설명이 있어야 하지만 특정 사용 사례에 맞게 도구의 설명이나 이름을 변경하는 것이 도움이 될 수도 있습니다. 이는 매우 유사한 여러 도구를 추가했거나 특정 도메인(*예*: 이미지 생성 및 변환)에만 에이전트를 사용하려는 경우에 특히 중요해질 수 있습니다. 일반적인 문제는 이미지 생성 작업에 많이 사용되는 경우 에이전트가 이미지 생성과 이미지 변환/수정을 혼동하는 것입니다. *예를 들어,* ```py agent.run("Make an image of a house and a car", return_code=True) ``` 그러면 다음 결과가 출력됩니다: ```text ==Explanation from the agent== I will use the following tools `image_generator` to generate an image of a house and `image_transformer` to transform the image of a car into the image of a house. ==Code generated by the agent== house_image = image_generator(prompt="A house") car_image = image_generator(prompt="A car") house_car_image = image_transformer(image=car_image, prompt="A house") ``` 결과물이 우리가 여기서 원하는 것과 정확히 일치하지 않을 수 있습니다. 에이전트가 `image_generator`와 `image_transformer`의 차이점을 이해하기 어려워서 두 가지를 함께 사용하는 경우가 많은 것 같습니다. 여기서 `image_transformer`의 도구 이름과 설명을 변경하여 에이전트가 도울 수 있습니다. "image" 및 "prompt"와 약간 분리하기 위해 `modifier`라고 대신 부르겠습니다: ```py agent.toolbox["modifier"] = agent.toolbox.pop("image_transformer") agent.toolbox["modifier"].description = agent.toolbox["modifier"].description.replace( "transforms an image according to a prompt", "modifies an image" ) ``` 이제 "modify"은 새 이미지 프로세서를 사용하라는 강력한 신호이므로 위의 프롬프트에 도움이 될 것입니다. 다시 실행해 봅시다. ```py agent.run("Make an image of a house and a car", return_code=True) ``` 여기서 다음과 같은 결과를 얻게 됩니다: ```text ==Explanation from the agent== I will use the following tools: `image_generator` to generate an image of a house, then `image_generator` to generate an image of a car. ==Code generated by the agent== house_image = image_generator(prompt="A house") car_image = image_generator(prompt="A car") ``` 우리가 염두에 두었던 것과 확실히 더 가까워졌습니다! 하지만 집과 자동차가 모두 같은 이미지에 포함되면 좋겠습니다. 작업을 단일 이미지 생성에 더 집중하면 도움이 될 것입니다: ```py agent.run("Create image: 'A house and car'", return_code=True) ``` ```text ==Explanation from the agent== I will use the following tool: `image_generator` to generate an image. ==Code generated by the agent== image = image_generator(prompt="A house and car") ``` <Tip warning={true}> 에이전트는 여전히 특히 여러 개체의 이미지를 생성하는 것과 같이 약간 더 복잡한 사용 사례에서 취약한 경우가 많습니다. 앞으로 몇 달 안에 에이전트 자체와 기본 프롬프트가 더욱 개선되어 에이전트가 다양한 사용자 입력에 더욱 강력하게 대응할 수 있도록 할 예정입니다. </Tip> ### 전체 프롬프트 사용자 정의하기[[customizing-the-whole-prompt]] 사용자에게 최대한의 유연성을 제공하기 위해 [위](#structure-of-the-prompt)에 설명된 전체 프롬프트 템플릿을 사용자가 덮어쓸 수 있습니다. 이 경우 사용자 정의 프롬프트에 소개 섹션, 도구 섹션, 예제 섹션 및 미완성 예제 섹션이 포함되어 있는지 확인하세요. `run` 프롬프트 템플릿을 덮어쓰려면 다음과 같이 하면 됩니다: ```py template = """ [...] """ agent = HfAgent(your_endpoint, run_prompt_template=template) ``` <Tip warning={true}> 에이전트가 사용 가능한 도구를 인식하고 사용자의 프롬프트를 올바르게 삽입할 수 있도록 `<<all_tools>>` 문자열과 `<<prompt>>`를 `template` 어딘가에 정의해야 합니다. </Tip> 마찬가지로 `chat` 프롬프트 템플릿을 덮어쓸 수 있습니다. `chat` 모드에서는 항상 다음과 같은 교환 형식을 사용한다는 점에 유의하세요: ```text Human: <<task>> Assistant: ``` 따라서 사용자 정의 `chat` 프롬프트 템플릿의 예제에서도 이 형식을 사용하는 것이 중요합니다. 다음과 같이 인스턴스화 할 때 `chat` 템플릿을 덮어쓸 수 있습니다. ```python template = """ [...] """ agent = HfAgent(url_endpoint=your_endpoint, chat_prompt_template=template) ``` <Tip warning={true}> 에이전트가 사용 가능한 도구를 인식할 수 있도록 `<<all_tools>>` 문자열을 `template` 어딘가에 정의해야 합니다. </Tip> 두 경우 모두 커뮤니티의 누군가가 호스팅하는 템플릿을 사용하려는 경우 프롬프트 템플릿 대신 저장소 ID를 전달할 수 있습니다. 기본 프롬프트는 [이 저장소](https://huggingface.co/datasets/huggingface-tools/default-prompts)를 예로 들 수 있습니다. Hub의 저장소에 사용자 정의 프롬프트를 업로드하여 커뮤니티와 공유하려면 다음을 확인하세요: - 데이터 세트 저장소를 사용하세요. - `run` 명령에 대한 프롬프트 템플릿을 `run_prompt_template.txt`라는 파일에 넣으세요. - `chat` 명령에 대한 프롬프트 템플릿을 `chat_prompt_template.txt`라는 파일에 넣으세요. ## 사용자 정의 도구 사용하기[[using-custom-tools]] 이 섹션에서는 이미지 생성에 특화된 두 가지 기존 사용자 정의 도구를 활용하겠습니다: - 더 많은 이미지 수정을 허용하기 위해 [huggingface-tools/image-transformation](https://huggingface.co/spaces/huggingface-tools/image-transformation)을 [diffusers/controlnet-canny-tool](https://huggingface.co/spaces/diffusers/controlnet-canny-tool)로 대체합니다. - 기본 도구 상자에 이미지 업스케일링을 위한 새로운 도구가 추가되었습니다: [diffusers/latent-upscaler-tool](https://huggingface.co/spaces/diffusers/latent-upscaler-tool)가 기존 이미지 변환 도구를 대체합니다. 편리한 [`load_tool`] 함수를 사용하여 사용자 정의 도구를 가져오는 것으로 시작하겠습니다: ```py from transformers import load_tool controlnet_transformer = load_tool("diffusers/controlnet-canny-tool") upscaler = load_tool("diffusers/latent-upscaler-tool") ``` 에이전트에게 사용자 정의 도구를 추가하면 도구의 설명과 이름이 에이전트의 프롬프트에 자동으로 포함됩니다. 따라서 에이전트가 사용 방법을 이해할 수 있도록 사용자 정의 도구의 설명과 이름을 잘 작성해야 합니다. `controlnet_transformer`의 설명과 이름을 살펴보겠습니다: ```py print(f"Description: '{controlnet_transformer.description}'") print(f"Name: '{controlnet_transformer.name}'") ``` 그러면 다음 결과가 출력됩니다: ```text Description: 'This is a tool that transforms an image with ControlNet according to a prompt. It takes two inputs: `image`, which should be the image to transform, and `prompt`, which should be the prompt to use to change it. It returns the modified image.' Name: 'image_transformer' ``` 이름과 설명이 정확하고 [큐레이팅 된 도구 세트(curated set of tools)](./transformers_agents#a-curated-set-of-tools)의 스타일에 맞습니다. 다음으로, `controlnet_transformer`와 `upscaler`로 에이전트를 인스턴스화해 봅시다: ```py tools = [controlnet_transformer, upscaler] agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=tools) ``` 이 명령을 실행하면 다음 정보가 표시됩니다: ```text image_transformer has been replaced by <transformers_modules.diffusers.controlnet-canny-tool.bd76182c7777eba9612fc03c0 8718a60c0aa6312.image_transformation.ControlNetTransformationTool object at 0x7f1d3bfa3a00> as provided in `additional_tools` ``` 큐레이팅된 도구 세트에는 이미 'image_transformer' 도구가 있으며, 이 도구는 사용자 정의 도구로 대체됩니다. <Tip> 기존 도구와 똑같은 작업에 사용자 정의 도구를 사용하려는 경우 기존 도구를 덮어쓰는 것이 유용할 수 있습니다. 에이전트가 해당 작업에 능숙하기 때문입니다. 이 경우 사용자 정의 도구가 덮어쓴 도구와 정확히 동일한 API를 따라야 하며, 그렇지 않으면 해당 도구를 사용하는 모든 예제가 업데이트되도록 프롬프트 템플릿을 조정해야 한다는 점에 유의하세요. </Tip> 업스케일러 도구에 지정된 'image_upscaler'라는 이름 아직 기본 도구 상자에는 존재하지 않기 때문에, 도구 목록에 해당 이름이 간단히 추가되었습니다. 에이전트가 현재 사용할 수 있는 도구 상자는 언제든지 `agent.toolbox` 속성을 통해 확인할 수 있습니다: ```py print("\n".join([f"- {a}" for a in agent.toolbox.keys()])) ``` ```text - document_qa - image_captioner - image_qa - image_segmenter - transcriber - summarizer - text_classifier - text_qa - text_reader - translator - image_transformer - text_downloader - image_generator - video_generator - image_upscaler ``` 에이전트의 도구 상자에 `image_upscaler`가 추가된 점을 주목하세요. 이제 새로운 도구를 사용해봅시다! [Transformers Agents Quickstart](./transformers_agents#single-execution-run)에서 생성한 이미지를 다시 사용하겠습니다. ```py from diffusers.utils import load_image image = load_image( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" ) ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> 이미지를 아름다운 겨울 풍경으로 바꿔 봅시다: ```py image = agent.run("Transform the image: 'A frozen lake and snowy forest'", image=image) ``` ```text ==Explanation from the agent== I will use the following tool: `image_transformer` to transform the image. ==Code generated by the agent== image = image_transformer(image, prompt="A frozen lake and snowy forest") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter.png" width=200> 새로운 이미지 처리 도구는 이미지를 매우 강력하게 수정할 수 있는 ControlNet을 기반으로 합니다. 기본적으로 이미지 처리 도구는 512x512 픽셀 크기의 이미지를 반환합니다. 이를 업스케일링할 수 있는지 살펴봅시다. ```py image = agent.run("Upscale the image", image) ``` ```text ==Explanation from the agent== I will use the following tool: `image_upscaler` to upscale the image. ==Code generated by the agent== upscaled_image = image_upscaler(image) ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_winter_upscale.png" width=400> 에이전트는 업스케일러 도구의 설명과 이름만 보고 방금 추가한 업스케일러 도구에 "이미지 업스케일링"이라는 프롬프트를 자동으로 매핑하여 올바르게 실행했습니다. 다음으로 새 사용자 정의 도구를 만드는 방법을 살펴보겠습니다. ### 새 도구 추가하기[[adding-new-tools]] 이 섹션에서는 에이전트에게 추가할 수 있는 새 도구를 만드는 방법을 보여 드립니다. #### 새 도구 만들기[[creating-a-new-tool]] 먼저 도구를 만드는 것부터 시작하겠습니다. 특정 작업에 대해 가장 많은 다운로드를 받은 Hugging Face Hub의 모델을 가져오는, 그다지 유용하지는 않지만 재미있는 작업을 추가하겠습니다. 다음 코드를 사용하면 됩니다: ```python from huggingface_hub import list_models task = "text-classification" model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) print(model.id) ``` `text-classification`(텍스트 분류) 작업의 경우 `'facebook/bart-large-mnli'`를 반환하고, `translation`(번역) 작업의 경우 `'google-t5/t5-base'`를 반환합니다. 이를 에이전트가 활용할 수 있는 도구로 변환하려면 어떻게 해야 할까요? 모든 도구는 필요한 주요 속성을 보유하는 슈퍼클래스 `Tool`에 의존합니다. 이를 상속하는 클래스를 만들어 보겠습니다: ```python from transformers import Tool class HFModelDownloadsTool(Tool): pass ``` 이 클래스에는 몇 가지 요구사항이 있습니다: - 도구 자체의 이름에 해당하는 `name` 속성. 수행명이 있는 다른 도구와 호환되도록 `model_download_counter`로 이름을 지정하겠습니다. - 에이전트의 프롬프트를 채우는 데 사용되는 속성 `description`. - `inputs` 및 `outputs` 속성. 이를 정의하면 Python 인터프리터가 유형에 대한 정보에 입각한 선택을 하는 데 도움이 되며, 도구를 허브에 푸시할 때 gradio 데모를 생성할 수 있습니다. 두 속성 모두 값은 '텍스트', '이미지' 또는 '오디오'가 될 수 있는 예상 값의 리스트입니다. - 추론 코드가 포함된 `__call__` 메소드. 이것이 우리가 위에서 다루었던 코드입니다! 이제 클래스의 모습은 다음과 같습니다: ```python from transformers import Tool from huggingface_hub import list_models class HFModelDownloadsTool(Tool): name = "model_download_counter" description = ( "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. " "It takes the name of the category (such as text-classification, depth-estimation, etc), and " "returns the name of the checkpoint." ) inputs = ["text"] outputs = ["text"] def __call__(self, task: str): model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) return model.id ``` 이제 도구를 손쉽게 사용할 수 있게 되었습니다. 도구를 파일에 저장하고 메인 스크립트에서 가져옵니다. 이 파일의 이름을 `model_downloads.py`로 지정하면 결과적으로 가져오기 코드는 다음과 같습니다: ```python from model_downloads import HFModelDownloadsTool tool = HFModelDownloadsTool() ``` 다른 사람들이 이 기능을 활용할 수 있도록 하고 초기화를 더 간단하게 하려면 네임스페이스 아래의 Hub로 푸시하는 것이 좋습니다. 그렇게 하려면 `tool` 변수에서 `push_to_hub`를 호출하면 됩니다: ```python tool.push_to_hub("hf-model-downloads") ``` 이제 허브에 코드가 생겼습니다! 마지막 단계인 에이전트가 코드를 사용하도록 하는 단계를 살펴보겠습니다. #### 에이전트가 도구를 사용하게 하기[[Having-the-agent-use-the-tool]] 이제 이런 식으로 허브에 존재하는 도구를 인스턴스화할 수 있습니다(도구의 사용자 이름은 변경하세요): We now have our tool that lives on the Hub which can be instantiated as such (change the user name for your tool): ```python from transformers import load_tool tool = load_tool("lysandre/hf-model-downloads") ``` 이 도구를 에이전트에서 사용하려면 에이전트 초기화 메소드의 `additional_tools` 매개변수에 전달하기만 하면 됩니다: ```python from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool]) agent.run( "Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" ) ``` 그러면 다음과 같은 결과가 출력됩니다: ```text ==Code generated by the agent== model = model_download_counter(task="text-to-video") print(f"The model with the most downloads is {model}.") audio_model = text_reader(model) ==Result== The model with the most downloads is damo-vilab/text-to-video-ms-1.7b. ``` and generates the following audio. | **Audio** | |------------------------------------------------------------------------------------------------------------------------------------------------------| | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> | <Tip> LLM에 따라 일부는 매우 취약하기 때문에 제대로 작동하려면 매우 정확한 프롬프트가 필요합니다. 에이전트가 도구를 잘 활용하기 위해서는 도구의 이름과 설명을 잘 정의하는 것이 무엇보다 중요합니다. </Tip> ### 기존 도구 대체하기[[replacing-existing-tools]] 에이전트의 도구 상자에 새 항목을 배정하기만 하면 기존 도구를 대체할 수 있습니다. 방법은 다음과 같습니다: ```python from transformers import HfAgent, load_tool agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") agent.toolbox["image-transformation"] = load_tool("diffusers/controlnet-canny-tool") ``` <Tip> 다른 도구로 교체할 때는 주의하세요! 이 작업으로 에이전트의 프롬프트도 조정됩니다. 작업에 더 적합한 프롬프트가 있으면 좋을 수 있지만, 다른 도구보다 더 많이 선택되거나 정의한 도구 대신 다른 도구가 선택될 수도 있습니다. </Tip> ## gradio-tools 사용하기[[leveraging-gradio-tools]] [gradio-tools](https://github.com/freddyaboulton/gradio-tools)는 Hugging Face Spaces를 도구로 사용할 수 있는 강력한 라이브러리입니다. 기존의 많은 Spaces뿐만 아니라 사용자 정의 Spaces를 사용하여 디자인할 수 있도록 지원합니다. 우리는 `Tool.from_gradio` 메소드를 사용하여 `gradio_tools`에 대한 지원을 제공합니다. 예를 들어, 프롬프트를 개선하고 더 나은 이미지를 생성하기 위해 `gradio-tools` 툴킷에서 제공되는 `StableDiffusionPromptGeneratorTool` 도구를 활용하고자 합니다. 먼저 `gradio_tools`에서 도구를 가져와서 인스턴스화합니다: ```python from gradio_tools import StableDiffusionPromptGeneratorTool gradio_tool = StableDiffusionPromptGeneratorTool() ``` 해당 인스턴스를 `Tool.from_gradio` 메소드에 전달합니다: ```python from transformers import Tool tool = Tool.from_gradio(gradio_tool) ``` 이제 일반적인 사용자 정의 도구와 똑같이 관리할 수 있습니다. 이를 활용하여 `a rabbit wearing a space suit'(우주복을 입은 토끼)라는 프롬프트를 개선했습니다: ```python from transformers import HfAgent agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder", additional_tools=[tool]) agent.run("Generate an image of the `prompt` after improving it.", prompt="A rabbit wearing a space suit") ``` 모델이 도구를 적절히 활용합니다: ```text ==Explanation from the agent== I will use the following tools: `StableDiffusionPromptGenerator` to improve the prompt, then `image_generator` to generate an image according to the improved prompt. ==Code generated by the agent== improved_prompt = StableDiffusionPromptGenerator(prompt) print(f"The improved prompt is {improved_prompt}.") image = image_generator(improved_prompt) ``` 마지막으로 이미지를 생성하기 전에: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png"> <Tip warning={true}> gradio-tools는 다른 모달리티로 작업할 때에도 *텍스트* 입력 및 출력을 필요로 합니다. 이 구현은 이미지 및 오디오 객체에서 작동합니다. 현재는 이 두 가지가 호환되지 않지만 지원 개선을 위해 노력하면서 빠르게 호환될 것입니다. </Tip> ## 향후 Langchain과의 호환성[[future-compatibility-with-langchain]] 저희는 Langchain을 좋아하며 매우 매력적인 도구 모음을 가지고 있다고 생각합니다. 이러한 도구를 처리하기 위해 Langchain은 다른 모달리티와 작업할 때에도 *텍스트* 입력과 출력을 필요로 합니다. 이는 종종 객체의 직렬화된(즉, 디스크에 저장된) 버전입니다. 이 차이로 인해 transformers-agents와 Langchain 간에는 멀티 모달리티가 처리되지 않습니다. 향후 버전에서 이 제한이 해결되기를 바라며, 이 호환성을 달성할 수 있도록 열렬한 Langchain 사용자의 도움을 환영합니다. 저희는 더 나은 지원을 제공하고자 합니다. 도움을 주고 싶으시다면, [이슈를 열어](https://github.com/huggingface/transformers/issues/new) 의견을 공유해 주세요.
transformers/docs/source/ko/custom_tools.md/0
{ "file_path": "transformers/docs/source/ko/custom_tools.md", "repo_id": "transformers", "token_count": 22824 }
265
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 패딩과 잘라내기[[padding-and-truncation]] 배치 입력은 길이가 다른 경우가 많아서 고정 크기 텐서로 변환할 수 없습니다. 패딩과 잘라내기는 다양한 길이의 배치에서 직사각형 텐서를 생성할 수 있도록 이 문제를 해결하는 전략입니다. 패딩은 특수한 **패딩 토큰**을 추가하여 짧은 시퀀스가 배치에서 가장 긴 시퀀스 또는 모델에서 허용하는 최대 길이와 동일한 길이를 갖도록 합니다. 잘라내기는 긴 시퀀스를 잘라내어 패딩과 다른 방식으로 시퀀스의 길이를 동일하게 합니다. 대부분의 경우 배치에 가장 긴 시퀀스의 길이로 패딩하고 모델이 허용할 수 있는 최대 길이로 잘라내는 것이 잘 작동합니다. 그러나 필요하다면 API가 지원하는 더 많은 전략을 사용할 수 있습니다. 필요한 인수는 `padding`, `truncation`, `max_length` 세 가지입니다. `padding` 인수는 패딩을 제어합니다. 불리언 또는 문자열일 수 있습니다: - `True` 또는 `'longest'`: 배치에서 가장 긴 시퀀스로 패딩합니다(단일 시퀀스만 제공하는 경우 패딩이 적용되지 않습니다). - `'max_length'`: `max_length` 인수가 지정한 길이로 패딩하거나, `max_length`가 제공되지 않은 경우(`max_length=None`) 모델에서 허용되는 최대 길이로 패딩합니다. 단일 시퀀스만 제공하는 경우에도 패딩이 적용됩니다. - `False` 또는 `'do_not_pad'`: 패딩이 적용되지 않습니다. 이것이 기본 동작입니다. `truncation` 인수는 잘라낼 방법을 정합니다. 불리언 또는 문자열일 수 있습니다: - `True` 또는 `longest_first`: `max_length` 인수가 지정한 최대 길이로 잘라내거나, `max_length`가 제공되지 않은 경우(`max_length=None`) 모델에서 허용되는 최대 길이로 잘라냅니다. 시퀀스 쌍에서 가장 긴 시퀀스의 토큰을 적절한 길이에 도달할 때까지 하나씩 제거합니다. - `'only_second'`: `max_length` 인수가 지정한 최대 길이로 잘라내거나, `max_length`가 제공되지 않은 경우(`max_length=None`) 모델에서 허용되는 최대 길이로 잘라냅니다. 시퀀스 쌍(또는 시퀀스 쌍의 배치)가 제공된 경우 쌍의 두 번째 문장만 잘라냅니다. - `'only_first'`: `max_length` 인수가 지정한 최대 길이로 잘라내거나, `max_length`가 제공되지 않은 경우(`max_length=None`) 모델에서 허용되는 최대 길이로 잘라냅니다. 시퀀스 쌍(또는 시퀀스 쌍의 배치)가 제공된 경우 쌍의 첫 번째 문장만 잘라냅니다. - `False` 또는 `'do_not_truncate'`: 잘라내기를 적용하지 않습니다. 이것이 기본 동작입니다. `max_length` 인수는 패딩 및 잘라내기를 적용할 길이를 제어합니다. 이 인수는 정수 또는 `None`일 수 있으며, `None`일 경우 모델이 허용할 수 있는 최대 길이로 기본값이 설정됩니다. 모델에 특정한 최대 입력 길이가 없는 경우 `max_length`에 대한 잘라내기 또는 패딩이 비활성화됩니다. 다음 표에는 패딩 및 잘라내기를 설정하는 권장 방법이 요약되어 있습니다. 입력으로 시퀀스 쌍을 사용하는 경우, 다음 예제에서 `truncation=True`를 `['only_first', 'only_second', 'longest_first']`에서 선택한 `STRATEGY`, 즉 `truncation='only_second'` 또는 `truncation='longest_first'`로 바꾸면 앞서 설명한 대로 쌍의 두 시퀀스가 잘리는 방식을 제어할 수 있습니다. | 잘라내기 | 패딩 | 사용 방법 | |--------------------------------------|-----------------------------------|------------------------------------------------------------------------------------------| | 잘라내기 없음 | 패딩 없음 | `tokenizer(batch_sentences)` | | | 배치 내 최대 길이로 패딩 | `tokenizer(batch_sentences, padding=True)` 또는 | | | | `tokenizer(batch_sentences, padding='longest')` | | | 모델의 최대 입력 길이로 패딩 | `tokenizer(batch_sentences, padding='max_length')` | | | 특정 길이로 패딩 | `tokenizer(batch_sentences, padding='max_length', max_length=42)` | | | 다양한 길이로 패딩 | `tokenizer(batch_sentences, padding=True, pad_to_multiple_of=8)` | | 모델의 최대 입력 길이로 잘라내기 | 패딩 없음 | `tokenizer(batch_sentences, truncation=True)` 또는 | | | | `tokenizer(batch_sentences, truncation=STRATEGY)` | | | 배치 내 최대 길이로 패딩 | `tokenizer(batch_sentences, padding=True, truncation=True)` 또는 | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY)` | | | 모델의 최대 입력 길이로 패딩 | `tokenizer(batch_sentences, padding='max_length', truncation=True)` 또는 | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)` | | | 특정 길이로 패딩 | 사용 불가 | | 특정 길이로 잘라내기 | 패딩 없음 | `tokenizer(batch_sentences, truncation=True, max_length=42)` 또는 | | | | `tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)` | | | 배치 내 최대 길이로 패딩 | `tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)` 또는 | | | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)` | | | 모델의 최대 입력 길이로 패딩 | 사용 불가 | | | 특정 길이로 패딩 | `tokenizer(batch_sentences, padding='max_length', truncation=True, max_length=42)` 또는 | | | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY, max_length=42)` |
transformers/docs/source/ko/pad_truncation.md/0
{ "file_path": "transformers/docs/source/ko/pad_truncation.md", "repo_id": "transformers", "token_count": 5964 }
266
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 전처리[[preprocess]] [[open-in-colab]] 모델을 훈련하려면 데이터 세트를 모델에 맞는 입력 형식으로 전처리해야 합니다. 텍스트, 이미지 또는 오디오인지 관계없이 데이터를 텐서 배치로 변환하고 조립할 필요가 있습니다. 🤗 Transformers는 모델에 대한 데이터를 준비하는 데 도움이 되는 일련의 전처리 클래스를 제공합니다. 이 튜토리얼에서는 다음 내용을 배울 수 있습니다: * 텍스트는 [Tokenizer](./main_classes/tokenizer)를 사용하여 토큰 시퀀스로 변환하고 토큰의 숫자 표현을 만든 후 텐서로 조립합니다. * 음성 및 오디오는 [Feature extractor](./main_classes/feature_extractor)를 사용하여 오디오 파형에서 시퀀스 특성을 파악하여 텐서로 변환합니다. * 이미지 입력은 [ImageProcessor](./main_classes/image)을 사용하여 이미지를 텐서로 변환합니다. * 멀티모달 입력은 [Processor](./main_classes/processors)을 사용하여 토크나이저와 특성 추출기 또는 이미지 프로세서를 결합합니다. <Tip> `AutoProcessor`는 **언제나** 작동하여 토크나이저, 이미지 프로세서, 특성 추출기 또는 프로세서 등 사용 중인 모델에 맞는 클래스를 자동으로 선택합니다. </Tip> 시작하기 전에 🤗 Datasets를 설치하여 실험에 사용할 데이터를 불러올 수 있습니다: ```bash pip install datasets ``` ## 자연어처리[[natural-language-processing]] <Youtube id="Yffk5aydLzg"/> 텍스트 데이터를 전처리하기 위한 기본 도구는 [tokenizer](main_classes/tokenizer)입니다. 토크나이저는 일련의 규칙에 따라 텍스트를 *토큰*으로 나눕니다. 토큰은 숫자로 변환되고 텐서는 모델 입력이 됩니다. 모델에 필요한 추가 입력은 토크나이저에 의해 추가됩니다. <Tip> 사전훈련된 모델을 사용할 계획이라면 모델과 함께 사전훈련된 토크나이저를 사용하는 것이 중요합니다. 이렇게 하면 텍스트가 사전훈련 말뭉치와 동일한 방식으로 분할되고 사전훈련 중에 동일한 해당 토큰-인덱스 쌍(일반적으로 *vocab*이라고 함)을 사용합니다. </Tip> 시작하려면 [`AutoTokenizer.from_pretrained`] 메소드를 사용하여 사전훈련된 토크나이저를 불러오세요. 모델과 함께 사전훈련된 *vocab*을 다운로드합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` 그 다음으로 텍스트를 토크나이저에 넣어주세요: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` 토크나이저는 세 가지 중요한 항목을 포함한 딕셔너리를 반환합니다: * [input_ids](glossary#input-ids)는 문장의 각 토큰에 해당하는 인덱스입니다. * [attention_mask](glossary#attention-mask)는 토큰을 처리해야 하는지 여부를 나타냅니다. * [token_type_ids](glossary#token-type-ids)는 두 개 이상의 시퀀스가 있을 때 토큰이 속한 시퀀스를 식별합니다. `input_ids`를 디코딩하여 입력을 반환합니다: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` 토크나이저가 두 개의 특수한 토큰(분류 토큰 `CLS`와 분할 토큰 `SEP`)을 문장에 추가했습니다. 모든 모델에 특수한 토큰이 필요한 것은 아니지만, 필요하다면 토크나이저가 자동으로 추가합니다. 전처리할 문장이 여러 개 있는 경우에는 리스트로 토크나이저에 전달합니다: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### 패딩[[pad]] 모델 입력인 텐서는 모양이 균일해야 하지만, 문장의 길이가 항상 같지는 않기 때문에 문제가 될 수 있습니다. 패딩은 짧은 문장에 특수한 *패딩 토큰*을 추가하여 텐서를 직사각형 모양이 되도록 하는 전략입니다. `padding` 매개변수를 `True`로 설정하여 배치 내의 짧은 시퀀스를 가장 긴 시퀀스에 맞춰 패딩합니다. ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` 길이가 짧은 첫 문장과 세 번째 문장이 이제 `0`으로 채워졌습니다. ### 잘라내기[[truncation]] 한편, 때로는 시퀀스가 모델에서 처리하기에 너무 길 수도 있습니다. 이 경우, 시퀀스를 더 짧게 줄일 필요가 있습니다. 모델에서 허용하는 최대 길이로 시퀀스를 자르려면 `truncation` 매개변수를 `True`로 설정하세요: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> 다양한 패딩과 잘라내기 인수에 대해 더 알아보려면 [패딩과 잘라내기](./pad_truncation) 개념 가이드를 확인해보세요. </Tip> ### 텐서 만들기[[build-tensors]] 마지막으로, 토크나이저가 모델에 공급되는 실제 텐서를 반환하도록 합니다. `return_tensors` 매개변수를 PyTorch의 경우 `pt`, TensorFlow의 경우 `tf`로 설정하세요: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## 오디오[[audio]] 오디오 작업은 모델에 맞는 데이터 세트를 준비하기 위해 [특성 추출기](main_classes/feature_extractor)가 필요합니다. 특성 추출기는 원시 오디오 데이터에서 특성를 추출하고 이를 텐서로 변환하는 것이 목적입니다. 오디오 데이터 세트에 특성 추출기를 사용하는 방법을 보기 위해 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터 세트를 가져오세요. (데이터 세트를 가져오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 자세히 설명하고 있습니다.) ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` `audio` 열의 첫 번째 요소에 접근하여 입력을 살펴보세요. `audio` 열을 호출하면 오디오 파일을 자동으로 가져오고 리샘플링합니다. ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 이렇게 하면 세 가지 항목이 반환됩니다: * `array`는 1D 배열로 가져와서 (필요한 경우) 리샘플링된 음성 신호입니다. * `path`는 오디오 파일의 위치를 가리킵니다. * `sampling_rate`는 음성 신호에서 초당 측정되는 데이터 포인트 수를 나타냅니다. 이 튜토리얼에서는 [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) 모델을 사용합니다. 모델 카드를 보면 Wav2Vec2가 16kHz 샘플링된 음성 오디오를 기반으로 사전훈련된 것을 알 수 있습니다. 모델을 사전훈련하는 데 사용된 데이터 세트의 샘플링 레이트와 오디오 데이터의 샘플링 레이트가 일치해야 합니다. 데이터의 샘플링 레이트가 다르면 데이터를 리샘플링해야 합니다. 1. 🤗 Datasets의 [`~datasets.Dataset.cast_column`] 메소드를 사용하여 샘플링 레이트를 16kHz로 업샘플링하세요: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. 오디오 파일을 리샘플링하기 위해 `audio` 열을 다시 호출합니다: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` 다음으로, 입력을 정규화하고 패딩할 특성 추출기를 가져오세요. 텍스트 데이터의 경우, 더 짧은 시퀀스에 대해 `0`이 추가됩니다. 오디오 데이터에도 같은 개념이 적용됩니다. 특성 추출기는 배열에 `0`(묵음으로 해석)을 추가합니다. [`AutoFeatureExtractor.from_pretrained`]를 사용하여 특성 추출기를 가져오세요: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` 오디오 `array`를 특성 추출기에 전달하세요. 또한, 발생할 수 있는 조용한 오류(silent errors)를 더 잘 디버깅할 수 있도록 특성 추출기에 `sampling_rate` 인수를 추가하는 것을 권장합니다. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` 토크나이저와 마찬가지로 배치 내에서 가변적인 시퀀스를 처리하기 위해 패딩 또는 잘라내기를 적용할 수 있습니다. 이 두 개의 오디오 샘플의 시퀀스 길이를 확인해보세요: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` 오디오 샘플의 길이가 동일하도록 데이터 세트를 전처리하는 함수를 만드세요. 최대 샘플 길이를 지정하면 특성 추출기가 해당 길이에 맞춰 시퀀스를 패딩하거나 잘라냅니다: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` `preprocess_function`을 데이터 세트의 처음 예시 몇 개에 적용해보세요: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` 이제 샘플 길이가 모두 같고 지정된 최대 길이에 맞게 되었습니다. 드디어 전처리된 데이터 세트를 모델에 전달할 수 있습니다! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## 컴퓨터 비전[[computer-vision]] 컴퓨터 비전 작업의 경우, 모델에 대한 데이터 세트를 준비하기 위해 [이미지 프로세서](main_classes/image_processor)가 필요합니다. 이미지 전처리는 이미지를 모델이 예상하는 입력으로 변환하는 여러 단계로 이루어집니다. 이러한 단계에는 크기 조정, 정규화, 색상 채널 보정, 이미지의 텐서 변환 등이 포함됩니다. <Tip> 이미지 전처리는 이미지 증강 기법을 몇 가지 적용한 뒤에 할 수도 있습니다. 이미지 전처리 및 이미지 증강은 모두 이미지 데이터를 변형하지만, 서로 다른 목적을 가지고 있습니다: * 이미지 증강은 과적합(over-fitting)을 방지하고 모델의 견고함(resiliency)을 높이는 데 도움이 되는 방식으로 이미지를 수정합니다. 밝기와 색상 조정, 자르기, 회전, 크기 조정, 확대/축소 등 다양한 방법으로 데이터를 증강할 수 있습니다. 그러나 증강으로 이미지의 의미가 바뀌지 않도록 주의해야 합니다. * 이미지 전처리는 이미지가 모델이 예상하는 입력 형식과 일치하도록 보장합니다. 컴퓨터 비전 모델을 미세 조정할 때 이미지는 모델이 초기에 훈련될 때와 정확히 같은 방식으로 전처리되어야 합니다. 이미지 증강에는 원하는 라이브러리를 무엇이든 사용할 수 있습니다. 이미지 전처리에는 모델과 연결된 `ImageProcessor`를 사용합니다. </Tip> [food101](https://huggingface.co/datasets/food101) 데이터 세트를 가져와서 컴퓨터 비전 데이터 세트에서 이미지 프로세서를 어떻게 사용하는지 알아보세요. 데이터 세트를 불러오는 방법은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)을 참고하세요. <Tip> 데이터 세트가 상당히 크기 때문에 🤗 Datasets의 `split` 매개변수를 사용하여 훈련 세트에서 작은 샘플만 가져오세요! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` 다음으로, 🤗 Datasets의 [`image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image)로 이미지를 확인해보세요: ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> [`AutoImageProcessor.from_pretrained`]로 이미지 프로세서를 가져오세요: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` 먼저 이미지 증강 단계를 추가해 봅시다. 아무 라이브러리나 사용해도 괜찮지만, 이번 튜토리얼에서는 torchvision의 [`transforms`](https://pytorch.org/vision/stable/transforms.html) 모듈을 사용하겠습니다. 다른 데이터 증강 라이브러리를 사용해보고 싶다면, [Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) 또는 [Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)에서 어떻게 사용하는지 배울 수 있습니다. 1. [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html)로 [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html)와 [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) 등 변환을 몇 가지 연결하세요. 참고로 크기 조정에 필요한 이미지의 크기 요구사항은 `image_processor`에서 가져올 수 있습니다. 일부 모델은 정확한 높이와 너비를 요구하지만, 제일 짧은 변의 길이(`shortest_edge`)만 정의된 모델도 있습니다. ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. 모델은 입력으로 [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values)를 받습니다. `ImageProcessor`는 이미지 정규화 및 적절한 텐서 생성을 처리할 수 있습니다. 배치 이미지에 대한 이미지 증강 및 이미지 전처리를 결합하고 `pixel_values`를 생성하는 함수를 만듭니다: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> 위의 예에서는 이미지 증강 중에 이미지 크기를 조정했기 때문에 `do_resize=False`로 설정하고, 해당 `image_processor`에서 `size` 속성을 활용했습니다. 이미지 증강 중에 이미지 크기를 조정하지 않은 경우 이 매개변수를 생략하세요. 기본적으로는 `ImageProcessor`가 크기 조정을 처리합니다. 증강 변환 과정에서 이미지를 정규화하려면 `image_processor.image_mean` 및 `image_processor.image_std` 값을 사용하세요. </Tip> 3. 🤗 Datasets의 [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)를 사용하여 실시간으로 변환을 적용합니다: ```py >>> dataset.set_transform(transforms) ``` 4. 이제 이미지에 접근하면 이미지 프로세서가 `pixel_values`를 추가한 것을 알 수 있습니다. 드디어 처리된 데이터 세트를 모델에 전달할 수 있습니다! ```py >>> dataset[0].keys() ``` 다음은 변형이 적용된 후의 이미지입니다. 이미지가 무작위로 잘려나갔고 색상 속성이 다릅니다. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> `ImageProcessor`는 객체 감지, 시맨틱 세그멘테이션(semantic segmentation), 인스턴스 세그멘테이션(instance segmentation), 파놉틱 세그멘테이션(panoptic segmentation)과 같은 작업에 대한 후처리 방법을 제공합니다. 이러한 방법은 모델의 원시 출력을 경계 상자나 세그멘테이션 맵과 같은 의미 있는 예측으로 변환해줍니다. </Tip> ### 패딩[[pad]] 예를 들어, [DETR](./model_doc/detr)와 같은 경우에는 모델이 훈련할 때 크기 조정 증강을 적용합니다. 이로 인해 배치 내 이미지 크기가 달라질 수 있습니다. [`DetrImageProcessor`]의 [`DetrImageProcessor.pad`]를 사용하고 사용자 정의 `collate_fn`을 정의해서 배치 이미지를 처리할 수 있습니다. ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## 멀티모달[[multimodal]] 멀티모달 입력이 필요한 작업의 경우, 모델에 데이터 세트를 준비하기 위한 [프로세서](main_classes/processors)가 필요합니다. 프로세서는 토크나이저와 특성 추출기와 같은 두 가지 처리 객체를 결합합니다. [LJ Speech](https://huggingface.co/datasets/lj_speech) 데이터 세트를 가져와서 자동 음성 인식(ASR)을 위한 프로세서를 사용하는 방법을 확인하세요. (데이터 세트를 가져오는 방법에 대한 자세한 내용은 🤗 [데이터 세트 튜토리얼](https://huggingface.co/docs/datasets/load_hub)에서 볼 수 있습니다.) ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` 자동 음성 인식(ASR)에서는 `audio`와 `text`에만 집중하면 되므로, 다른 열들은 제거할 수 있습니다: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` 이제 `audio`와 `text`열을 살펴보세요: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` 기존에 사전훈련된 모델에서 사용된 데이터 세트와 새로운 오디오 데이터 세트의 샘플링 레이트를 일치시키기 위해 오디오 데이터 세트의 샘플링 레이트를 [리샘플링](preprocessing#audio)해야 합니다! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` [`AutoProcessor.from_pretrained`]로 프로세서를 가져오세요: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. `array`에 들어 있는 오디오 데이터를 `input_values`로 변환하고 `text`를 토큰화하여 `labels`로 변환하는 함수를 만듭니다. 모델의 입력은 다음과 같습니다: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. 샘플을 `prepare_dataset` 함수에 적용하세요: ```py >>> prepare_dataset(lj_speech[0]) ``` 이제 프로세서가 `input_values`와 `labels`를 추가하고, 샘플링 레이트도 올바르게 16kHz로 다운샘플링했습니다. 드디어 처리된 데이터 세트를 모델에 전달할 수 있습니다!
transformers/docs/source/ko/preprocessing.md/0
{ "file_path": "transformers/docs/source/ko/preprocessing.md", "repo_id": "transformers", "token_count": 17108 }
267
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tour rápido [[open-in-colab]] Comece a trabalhar com 🤗 Transformers! Comece usando [`pipeline`] para rápida inferência e facilmente carregue um modelo pré-treinado e um tokenizer com [AutoClass](./model_doc/auto) para resolver tarefas de texto, visão ou áudio. <Tip> Todos os exemplos de código apresentados na documentação têm um botão no canto superior direito para escolher se você deseja ocultar ou mostrar o código no Pytorch ou no TensorFlow. Caso contrário, é esperado que funcione para ambos back-ends sem nenhuma alteração. </Tip> ## Pipeline [`pipeline`] é a maneira mais fácil de usar um modelo pré-treinado para uma dada tarefa. <Youtube id="tiZFewofSLM"/> A [`pipeline`] apoia diversas tarefas fora da caixa: **Texto**: * Análise sentimental: classifica a polaridade de um texto. * Geração de texto (em Inglês): gera texto a partir de uma entrada. * Reconhecimento de entidade mencionada: legenda cada palavra com uma classe que a representa (pessoa, data, local, etc...) * Respostas: extrai uma resposta dado algum contexto e uma questão * Máscara de preenchimento: preenche o espaço, dado um texto com máscaras de palavras. * Sumarização: gera o resumo de um texto longo ou documento. * Tradução: traduz texto para outra língua. * Extração de características: cria um tensor que representa o texto. **Imagem**: * Classificação de imagens: classifica uma imagem. * Segmentação de imagem: classifica cada pixel da imagem. * Detecção de objetos: detecta objetos em uma imagem. **Audio**: * Classficação de áudio: legenda um trecho de áudio fornecido. * Reconhecimento de fala automático: transcreve audio em texto. <Tip> Para mais detalhes sobre a [`pipeline`] e tarefas associadas, siga a documentação [aqui](./main_classes/pipelines). </Tip> ### Uso da pipeline No exemplo a seguir, você usará [`pipeline`] para análise sentimental. Instale as seguintes dependências se você ainda não o fez: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importe [`pipeline`] e especifique a tarefa que deseja completar: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` A pipeline baixa and armazena um [modelo pré-treinado](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) padrão e tokenizer para análise sentimental. Agora você pode usar `classifier` no texto alvo: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` Para mais de uma sentença, passe uma lista para a [`pipeline`], a qual retornará uma lista de dicionários: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` A [`pipeline`] também pode iterar sobre um Dataset inteiro. Comece instalando a biblioteca de [🤗 Datasets](https://huggingface.co/docs/datasets/): ```bash pip install datasets ``` Crie uma [`pipeline`] com a tarefa que deseja resolver e o modelo que deseja usar. ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` A seguir, carregue uma base de dados (confira a 🤗 [Iniciação em Datasets](https://huggingface.co/docs/datasets/quickstart) para mais detalhes) que você gostaria de iterar sobre. Por exemplo, vamos carregar o dataset [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Precisamos garantir que a taxa de amostragem do conjunto de dados corresponda à taxa de amostragem em que o facebook/wav2vec2-base-960h foi treinado. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Os arquivos de áudio são carregados e re-amostrados automaticamente ao chamar a coluna `"audio"`. Vamos extrair as arrays de formas de onda originais das primeiras 4 amostras e passá-las como uma lista para o pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I TURN A JOIN A COUNT'] ``` Para um conjunto de dados maior onde as entradas são maiores (como em fala ou visão), será necessário passar um gerador em vez de uma lista que carregue todas as entradas na memória. Consulte a [documentação do pipeline](./main_classes/pipelines) para mais informações. ### Use outro modelo e tokenizer na pipeline A [`pipeline`] pode acomodar qualquer modelo do [Model Hub](https://huggingface.co/models), facilitando sua adaptação para outros casos de uso. Por exemplo, se você quiser um modelo capaz de lidar com texto em francês, use as tags no Model Hub para filtrar um modelo apropriado. O principal resultado filtrado retorna um [modelo BERT](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) bilíngue ajustado para análise de sentimentos. Ótimo, vamos usar este modelo! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use o [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] para carregar o modelo pré-treinado e seu tokenizer associado (mais em `AutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use o [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] para carregar o modelo pré-treinado e o tokenizer associado (mais em `TFAutoClass` abaixo): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Então você pode especificar o modelo e o tokenizador na [`pipeline`] e aplicar o `classifier` no seu texto alvo: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Se você não conseguir achar um modelo para o seu caso de uso, precisará usar fine-tune em um modelo pré-treinado nos seus dados. Veja nosso [tutorial de fine-tuning](./training) para descobrir como. Finalmente, depois que você tiver usado esse processo em seu modelo, considere compartilhá-lo conosco (veja o tutorial [aqui](./model_sharing)) na plataforma Model Hub afim de democratizar NLP! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Por baixo dos panos, as classes [`AutoModelForSequenceClassification`] e [`AutoTokenizer`] trabalham juntas para fortificar o [`pipeline`]. Um [AutoClass](./model_doc/auto) é um atalho que automaticamente recupera a arquitetura de um modelo pré-treinado a partir de seu nome ou caminho. Basta selecionar a `AutoClass` apropriada para sua tarefa e seu tokenizer associado com [`AutoTokenizer`]. Vamos voltar ao nosso exemplo e ver como você pode usar a `AutoClass` para replicar os resultados do [`pipeline`]. ### AutoTokenizer Um tokenizer é responsável por pré-processar o texto em um formato que seja compreensível para o modelo. Primeiro, o tokenizer dividirá o texto em palavras chamadas *tokens*. Existem várias regras que regem o processo de tokenização, incluindo como dividir uma palavra e em que nível (saiba mais sobre tokenização [aqui](./tokenizer_summary)). A coisa mais importante a lembrar, porém, é que você precisa instanciar o tokenizer com o mesmo nome do modelo para garantir que está usando as mesmas regras de tokenização com as quais um modelo foi pré-treinado. Carregue um tokenizer com [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Em seguida, o tokenizer converte os tokens em números para construir um tensor como entrada para o modelo. Isso é conhecido como o *vocabulário* do modelo. Passe o texto para o tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` O tokenizer retornará um dicionário contendo: * [input_ids](./glossary#input-ids): representações numéricas de seus tokens. * [atttention_mask](.glossary#attention-mask): indica quais tokens devem ser atendidos. Assim como o [`pipeline`], o tokenizer aceitará uma lista de entradas. Além disso, o tokenizer também pode preencher e truncar o texto para retornar um lote com comprimento uniforme: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Leia o tutorial de [pré-processamento](./pré-processamento) para obter mais detalhes sobre tokenização. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`AutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`AutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo. Você apenas tem que descompactar o dicionário usando `**`: ```py >>> pt_outputs = pt_model(**pt_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers fornecem uma maneira simples e unificada de carregar instâncias pré-treinadas. Isso significa que você pode carregar um [`TFAutoModel`] como carregaria um [`AutoTokenizer`]. A única diferença é selecionar o [`TFAutoModel`] correto para a tarefa. Como você está fazendo classificação de texto ou sequência, carregue [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> Veja o [sumário de tarefas](./task_summary) para qual classe de [`AutoModel`] usar para cada tarefa. </Tip> Agora você pode passar seu grupo de entradas pré-processadas diretamente para o modelo através da passagem de chaves de dicionários ao tensor. ```py >>> tf_outputs = tf_model(tf_batch) ``` O modelo gera as ativações finais no atributo `logits`. Aplique a função softmax aos `logits` para recuperar as probabilidades: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Todos os modelos de 🤗 Transformers (PyTorch ou TensorFlow) geram tensores *antes* da função de ativação final (como softmax) pois essa função algumas vezes é fundida com a perda. </Tip> Os modelos são um standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) ou um [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) para que você possa usá-los em seu loop de treinamento habitual. No entanto, para facilitar as coisas, 🤗 Transformers fornece uma classe [`Trainer`] para PyTorch que adiciona funcionalidade para treinamento distribuído, precisão mista e muito mais. Para o TensorFlow, você pode usar o método `fit` de [Keras](https://keras.io/). Consulte o [tutorial de treinamento](./training) para obter mais detalhes. <Tip> As saídas do modelo 🤗 Transformers são classes de dados especiais para que seus atributos sejam preenchidos automaticamente em um IDE. As saídas do modelo também se comportam como uma tupla ou um dicionário (por exemplo, você pode indexar com um inteiro, uma parte ou uma string), caso em que os atributos `None` são ignorados. </Tip> ### Salvar um modelo <frameworkcontent> <pt> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`PreTrainedModel.save_pretrained`]: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Uma vez que seu modelo estiver afinado, você pode salvá-lo com seu Tokenizer usando [`TFPreTrainedModel.save_pretrained`]: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Quando você estiver pronto para usá-lo novamente, recarregue com [`TFPreTrainedModel.from_pretrained`] ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Um recurso particularmente interessante dos 🤗 Transformers é a capacidade de salvar um modelo e recarregá-lo como um modelo PyTorch ou TensorFlow. Use `from_pt` ou `from_tf` para converter o modelo de um framework para outro: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent>
transformers/docs/source/pt/quicktour.md/0
{ "file_path": "transformers/docs/source/pt/quicktour.md", "repo_id": "transformers", "token_count": 6110 }
268
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 聊天模型的模板 ## 介绍 LLM 的一个常见应用场景是聊天。在聊天上下文中,不再是连续的文本字符串构成的语句(不同于标准的语言模型), 聊天模型由一条或多条消息组成的对话组成,每条消息都有一个“用户”或“助手”等 **角色**,还包括消息文本。 与`Tokenizer`类似,不同的模型对聊天的输入格式要求也不同。这就是我们添加**聊天模板**作为一个功能的原因。 聊天模板是`Tokenizer`的一部分。用来把问答的对话内容转换为模型的输入`prompt`。 让我们通过一个快速的示例来具体说明,使用`BlenderBot`模型。 BlenderBot有一个非常简单的默认模板,主要是在对话轮之间添加空格: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) " Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>" ``` 注意,整个聊天对话内容被压缩成了一整个字符串。如果我们使用默认设置的`tokenize=True`,那么该字符串也将被tokenized处理。 不过,为了看到更复杂的模板实际运行,让我们使用`mistralai/Mistral-7B-Instruct-v0.1`模型。 ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1") >>> chat = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "I'd like to show off how chat templating works!"}, ... ] >>> tokenizer.apply_chat_template(chat, tokenize=False) "<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]" ``` 可以看到,这一次tokenizer已经添加了[INST]和[/INST]来表示用户消息的开始和结束。 Mistral-instruct是有使用这些token进行训练的,但BlenderBot没有。 ## 我如何使用聊天模板? 正如您在上面的示例中所看到的,聊天模板非常容易使用。只需构建一系列带有`role`和`content`键的消息, 然后将其传递给[`~PreTrainedTokenizer.apply_chat_template`]方法。 另外,在将聊天模板用作模型预测的输入时,还建议使用`add_generation_prompt=True`来添加[generation prompt](#什么是generation-prompts)。 这是一个准备`model.generate()`的示例,使用`Zephyr`模型: ```python from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "HuggingFaceH4/zephyr-7b-beta" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) # You may want to use bfloat16 and/or move to GPU here messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") print(tokenizer.decode(tokenized_chat[0])) ``` 这将生成Zephyr期望的输入格式的字符串。它看起来像这样: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> ``` 现在我们已经按照`Zephyr`的要求传入prompt了,我们可以使用模型来生成对用户问题的回复: ```python outputs = model.generate(tokenized_chat, max_new_tokens=128) print(tokenizer.decode(outputs[0])) ``` 输出结果是: ```text <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> <|user|> How many helicopters can a human eat in one sitting?</s> <|assistant|> Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` 啊,原来这么容易! ## 有自动化的聊天`pipeline`吗? 有的,[`ConversationalPipeline`]。这个`pipeline`的设计是为了方便使用聊天模型。让我们再试一次 Zephyr 的例子,但这次使用`pipeline`: ```python from transformers import pipeline pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta") messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] print(pipe(messages)) ``` ```text Conversation id: 76d886a0-74bd-454e-9804-0467041a63dc system: You are a friendly chatbot who always responds in the style of a pirate user: How many helicopters can a human eat in one sitting? assistant: Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all. ``` [`ConversationalPipeline`]将负责处理所有的`tokenized`并调用`apply_chat_template`,一旦模型有了聊天模板,您只需要初始化pipeline并传递消息列表! ## 什么是"generation prompts"? 您可能已经注意到`apply_chat_template`方法有一个`add_generation_prompt`参数。 这个参数告诉模板添加模型开始答复的标记。例如,考虑以下对话: ```python messages = [ {"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": "Nice to meet you!"}, {"role": "user", "content": "Can I ask a question?"} ] ``` 这是`add_generation_prompt=False`的结果,使用ChatML模板: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> """ ``` 下面这是`add_generation_prompt=True`的结果: ```python tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) """<|im_start|>user Hi there!<|im_end|> <|im_start|>assistant Nice to meet you!<|im_end|> <|im_start|>user Can I ask a question?<|im_end|> <|im_start|>assistant """ ``` 这一次我们添加了模型开始答复的标记。这可以确保模型生成文本时只会给出答复,而不会做出意外的行为,比如继续用户的消息。 记住,聊天模型只是语言模型,它们被训练来继续文本,而聊天对它们来说只是一种特殊的文本! 你需要用适当的控制标记来引导它们,让它们知道自己应该做什么。 并非所有模型都需要生成提示。一些模型,如BlenderBot和LLaMA,在模型回复之前没有任何特殊标记。 在这些情况下,`add_generation_prompt`参数将不起作用。`add_generation_prompt`参数取决于你所使用的模板。 ## 我可以在训练中使用聊天模板吗? 可以!我们建议您将聊天模板应用为数据集的预处理步骤。之后,您可以像进行任何其他语言模型训练任务一样继续。 在训练时,通常应该设置`add_generation_prompt=False`,因为添加的助手标记在训练过程中并不会有帮助。 让我们看一个例子: ```python from transformers import AutoTokenizer from datasets import Dataset tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") chat1 = [ {"role": "user", "content": "Which is bigger, the moon or the sun?"}, {"role": "assistant", "content": "The sun."} ] chat2 = [ {"role": "user", "content": "Which is bigger, a virus or a bacterium?"}, {"role": "assistant", "content": "A bacterium."} ] dataset = Dataset.from_dict({"chat": [chat1, chat2]}) dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)}) print(dataset['formatted_chat'][0]) ``` 结果是: ```text <|user|> Which is bigger, the moon or the sun?</s> <|assistant|> The sun.</s> ``` 这样,后面你可以使用`formatted_chat`列,跟标准语言建模任务中一样训练即可。 ## 高级:聊天模板是如何工作的? 模型的聊天模板存储在`tokenizer.chat_template`属性上。如果没有设置,则将使用该模型的默认模板。 让我们来看看`BlenderBot`的模板: ```python >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") >>> tokenizer.default_chat_template "{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}" ``` 这看着有点复杂。让我们添加一些换行和缩进,使其更易读。 请注意,默认情况下忽略每个块后的第一个换行以及块之前的任何前导空格, 使用Jinja的`trim_blocks`和`lstrip_blocks`标签。 这里,请注意空格的使用。我们强烈建议您仔细检查模板是否打印了多余的空格! ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ ' ' }} {% endif %} {{ message['content'] }} {% if not loop.last %} {{ ' ' }} {% endif %} {% endfor %} {{ eos_token }} ``` 如果你之前不了解[Jinja template](https://jinja.palletsprojects.com/en/3.1.x/templates/)。 Jinja是一种模板语言,允许你编写简单的代码来生成文本。 在许多方面,代码和语法类似于Python。在纯Python中,这个模板看起来会像这样: ```python for idx, message in enumerate(messages): if message['role'] == 'user': print(' ') print(message['content']) if not idx == len(messages) - 1: # Check for the last message in the conversation print(' ') print(eos_token) ``` 这里使用Jinja模板处理如下三步: 1. 对于每条消息,如果消息是用户消息,则在其前面添加一个空格,否则不打印任何内容 2. 添加消息内容 3. 如果消息不是最后一条,请在其后添加两个空格。在最后一条消息之后,打印`EOS`。 这是一个简单的模板,它不添加任何控制tokens,也不支持`system`消息(常用于指导模型在后续对话中如何表现)。 但 Jinja 给了你很大的灵活性来做这些事情!让我们看一个 Jinja 模板, 它可以实现类似于LLaMA的prompt输入(请注意,真正的LLaMA模板包括`system`消息,请不要在实际代码中使用这个简单模板!) ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'] + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ ' ' + message['content'] + ' ' + eos_token }} {% endif %} {% endfor %} ``` 这里稍微看一下,就能明白这个模板的作用:它根据每条消息的“角色”添加对应的消息。 `user`、`assistant`、`system`的消息需要分别处理,因为它们代表不同的角色输入。 ## 高级:编辑聊天模板 ### 如何创建聊天模板? 很简单,你只需编写一个jinja模板并设置`tokenizer.chat_template`。你也可以从一个现有模板开始,只需要简单编辑便可以! 例如,我们可以采用上面的LLaMA模板,并在助手消息中添加"[ASST]"和"[/ASST]": ``` {% for message in messages %} {% if message['role'] == 'user' %} {{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }} {% elif message['role'] == 'system' %} {{ '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }} {% elif message['role'] == 'assistant' %} {{ '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }} {% endif %} {% endfor %} ``` 现在,只需设置`tokenizer.chat_template`属性。下次使用[`~PreTrainedTokenizer.apply_chat_template`]时,它将使用您的新模板! 此属性将保存在`tokenizer_config.json`文件中,因此您可以使用[`~utils.PushToHubMixin.push_to_hub`]将新模板上传到 Hub, 这样每个人都可以使用你模型的模板! ```python template = tokenizer.chat_template template = template.replace("SYS", "SYSTEM") # Change the system token tokenizer.chat_template = template # Set the new template tokenizer.push_to_hub("model_name") # Upload your new template to the Hub! ``` 由于[`~PreTrainedTokenizer.apply_chat_template`]方法是由[`ConversationalPipeline`]类调用, 因此一旦你设置了聊天模板,您的模型将自动与[`ConversationalPipeline`]兼容。 ### “默认”模板是什么? 在引入聊天模板(chat_template)之前,聊天prompt是在模型中通过硬编码处理的。为了向前兼容,我们保留了这种硬编码处理聊天prompt的方法。 如果一个模型没有设置聊天模板,但其模型有默认模板,`ConversationalPipeline`类和`apply_chat_template`等方法将使用该模型的聊天模板。 您可以通过检查`tokenizer.default_chat_template`属性来查找`tokenizer`的默认模板。 这是我们纯粹为了向前兼容性而做的事情,以避免破坏任何现有的工作流程。即使默认的聊天模板适用于您的模型, 我们强烈建议通过显式设置`chat_template`属性来覆盖默认模板,以便向用户清楚地表明您的模型已经正确的配置了聊天模板, 并且为了未来防范默认模板被修改或弃用的情况。 ### 我应该使用哪个模板? 在为已经训练过的聊天模型设置模板时,您应确保模板与模型在训练期间看到的消息格式完全匹配,否则可能会导致性能下降。 即使您继续对模型进行训练,也应保持聊天模板不变,这样可能会获得最佳性能。 这与`tokenization`非常类似,在推断时,你选用跟训练时一样的`tokenization`,通常会获得最佳性能。 如果您从头开始训练模型,或者在微调基础语言模型进行聊天时,您有很大的自由选择适当的模板! LLMs足够聪明,可以学会处理许多不同的输入格式。我们为没有特定类别模板的模型提供一个默认模板,该模板遵循 `ChatML` format格式要求,对于许多用例来说, 这是一个很好的、灵活的选择。 默认模板看起来像这样: ``` {% for message in messages %} {{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}} {% endfor %} ``` 如果您喜欢这个模板,下面是一行代码的模板形式,它可以直接复制到您的代码中。这一行代码还包括了[generation prompts](#什么是"generation prompts"?), 但请注意它不会添加`BOS`或`EOS`token。 如果您的模型需要这些token,它们不会被`apply_chat_template`自动添加,换句话说,文本的默认处理参数是`add_special_tokens=False`。 这是为了避免模板和`add_special_tokens`逻辑产生冲突,如果您的模型需要特殊tokens,请确保将它们添加到模板中! ``` tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" ``` 该模板将每条消息包装在`<|im_start|>`和`<|im_end|>`tokens里面,并将角色简单地写为字符串,这样可以灵活地训练角色。输出如下: ```text <|im_start|>system You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|> <|im_start|>user How are you?<|im_end|> <|im_start|>assistant I'm doing great!<|im_end|> ``` `user`,`system`和`assistant`是对话助手模型的标准角色,如果您的模型要与[`ConversationalPipeline`]兼容,我们建议你使用这些角色。 但您可以不局限于这些角色,模板非常灵活,任何字符串都可以成为角色。 ### 如何添加聊天模板? 如果您有任何聊天模型,您应该设置它们的`tokenizer.chat_template`属性,并使用[`~PreTrainedTokenizer.apply_chat_template`]测试, 然后将更新后的`tokenizer`推送到 Hub。 即使您不是模型所有者,如果您正在使用一个空的聊天模板或者仍在使用默认的聊天模板, 请发起一个[pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions),以便正确设置该属性! 一旦属性设置完成,就完成了!`tokenizer.apply_chat_template`现在将在该模型中正常工作, 这意味着它也会自动支持在诸如`ConversationalPipeline`的地方! 通过确保模型具有这一属性,我们可以确保整个社区都能充分利用开源模型的全部功能。 格式不匹配已经困扰这个领域并悄悄地损害了性能太久了,是时候结束它们了! ## 高级:模板写作技巧 如果你对Jinja不熟悉,我们通常发现编写聊天模板的最简单方法是先编写一个简短的Python脚本,按照你想要的方式格式化消息,然后将该脚本转换为模板。 请记住,模板处理程序将接收对话历史作为名为`messages`的变量。每条`message`都是一个带有两个键`role`和`content`的字典。 您可以在模板中像在Python中一样访问`messages`,这意味着您可以使用`{% for message in messages %}`进行循环, 或者例如使用`{{ messages[0] }}`访问单个消息。 您也可以使用以下提示将您的代码转换为Jinja: ### For循环 在Jinja中,for循环看起来像这样: ``` {% for message in messages %} {{ message['content'] }} {% endfor %} ``` 请注意,`{{ expression block }}`中的内容将被打印到输出。您可以在表达式块中使用像`+`这样的运算符来组合字符串。 ### If语句 Jinja中的if语句如下所示: ``` {% if message['role'] == 'user' %} {{ message['content'] }} {% endif %} ``` 注意Jinja使用`{% endfor %}`和`{% endif %}`来表示`for`和`if`的结束。 ### 特殊变量 在您的模板中,您将可以访问`messages`列表,但您还可以访问其他几个特殊变量。 这些包括特殊`token`,如`bos_token`和`eos_token`,以及我们上面讨论过的`add_generation_prompt`变量。 您还可以使用`loop`变量来访问有关当前循环迭代的信息,例如使用`{% if loop.last %}`来检查当前消息是否是对话中的最后一条消息。 以下是一个示例,如果`add_generation_prompt=True`需要在对话结束时添加`generate_prompt`: ``` {% if loop.last and add_generation_prompt %} {{ bos_token + 'Assistant:\n' }} {% endif %} ``` ### 空格的注意事项 我们已经尽可能尝试让Jinja忽略除`{{ expressions }}`之外的空格。 然而,请注意Jinja是一个通用的模板引擎,它可能会将同一行文本块之间的空格视为重要,并将其打印到输出中。 我们**强烈**建议在上传模板之前检查一下,确保模板没有在不应该的地方打印额外的空格!
transformers/docs/source/zh/chat_templating.md/0
{ "file_path": "transformers/docs/source/zh/chat_templating.md", "repo_id": "transformers", "token_count": 11163 }
269
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 模型输出 所有模型的输出都是 [`~utils.ModelOutput`] 的子类的实例。这些是包含模型返回的所有信息的数据结构,但也可以用作元组或字典。 让我们看一个例子: ```python from transformers import BertTokenizer, BertForSequenceClassification import torch tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") model = BertForSequenceClassification.from_pretrained("google-bert/bert-base-uncased") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 outputs = model(**inputs, labels=labels) ``` `outputs` 对象是 [`~modeling_outputs.SequenceClassifierOutput`],如下面该类的文档中所示,它表示它有一个可选的 `loss`,一个 `logits`,一个可选的 `hidden_states` 和一个可选的 `attentions` 属性。在这里,我们有 `loss`,因为我们传递了 `labels`,但我们没有 `hidden_states` 和 `attentions`,因为我们没有传递 `output_hidden_states=True` 或 `output_attentions=True`。 <Tip> 当传递 `output_hidden_states=True` 时,您可能希望 `outputs.hidden_states[-1]` 与 `outputs.last_hidden_states` 完全匹配。然而,这并不总是成立。一些模型在返回最后的 hidden state时对其应用归一化或其他后续处理。 </Tip> 您可以像往常一样访问每个属性,如果模型未返回该属性,您将得到 `None`。在这里,例如,`outputs.loss` 是模型计算的损失,而 `outputs.attentions` 是 `None`。 当将我们的 `outputs` 对象视为元组时,它仅考虑那些没有 `None` 值的属性。例如这里它有两个元素,`loss` 和 `logits`,所以 ```python outputs[:2] ``` 将返回元组 `(outputs.loss, outputs.logits)`。 将我们的 `outputs` 对象视为字典时,它仅考虑那些没有 `None` 值的属性。例如在这里它有两个键,分别是 `loss` 和 `logits`。 我们在这里记录了被多个类型模型使用的通用模型输出。特定输出类型在其相应的模型页面上有文档。 ## ModelOutput [[autodoc]] utils.ModelOutput - to_tuple ## BaseModelOutput [[autodoc]] modeling_outputs.BaseModelOutput ## BaseModelOutputWithPooling [[autodoc]] modeling_outputs.BaseModelOutputWithPooling ## BaseModelOutputWithCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithCrossAttentions ## BaseModelOutputWithPoolingAndCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithPoolingAndCrossAttentions ## BaseModelOutputWithPast [[autodoc]] modeling_outputs.BaseModelOutputWithPast ## BaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_outputs.BaseModelOutputWithPastAndCrossAttentions ## Seq2SeqModelOutput [[autodoc]] modeling_outputs.Seq2SeqModelOutput ## CausalLMOutput [[autodoc]] modeling_outputs.CausalLMOutput ## CausalLMOutputWithCrossAttentions [[autodoc]] modeling_outputs.CausalLMOutputWithCrossAttentions ## CausalLMOutputWithPast [[autodoc]] modeling_outputs.CausalLMOutputWithPast ## MaskedLMOutput [[autodoc]] modeling_outputs.MaskedLMOutput ## Seq2SeqLMOutput [[autodoc]] modeling_outputs.Seq2SeqLMOutput ## NextSentencePredictorOutput [[autodoc]] modeling_outputs.NextSentencePredictorOutput ## SequenceClassifierOutput [[autodoc]] modeling_outputs.SequenceClassifierOutput ## Seq2SeqSequenceClassifierOutput [[autodoc]] modeling_outputs.Seq2SeqSequenceClassifierOutput ## MultipleChoiceModelOutput [[autodoc]] modeling_outputs.MultipleChoiceModelOutput ## TokenClassifierOutput [[autodoc]] modeling_outputs.TokenClassifierOutput ## QuestionAnsweringModelOutput [[autodoc]] modeling_outputs.QuestionAnsweringModelOutput ## Seq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_outputs.Seq2SeqQuestionAnsweringModelOutput ## Seq2SeqSpectrogramOutput [[autodoc]] modeling_outputs.Seq2SeqSpectrogramOutput ## SemanticSegmenterOutput [[autodoc]] modeling_outputs.SemanticSegmenterOutput ## ImageClassifierOutput [[autodoc]] modeling_outputs.ImageClassifierOutput ## ImageClassifierOutputWithNoAttention [[autodoc]] modeling_outputs.ImageClassifierOutputWithNoAttention ## DepthEstimatorOutput [[autodoc]] modeling_outputs.DepthEstimatorOutput ## Wav2Vec2BaseModelOutput [[autodoc]] modeling_outputs.Wav2Vec2BaseModelOutput ## XVectorOutput [[autodoc]] modeling_outputs.XVectorOutput ## Seq2SeqTSModelOutput [[autodoc]] modeling_outputs.Seq2SeqTSModelOutput ## Seq2SeqTSPredictionOutput [[autodoc]] modeling_outputs.Seq2SeqTSPredictionOutput ## SampleTSPredictionOutput [[autodoc]] modeling_outputs.SampleTSPredictionOutput ## TFBaseModelOutput [[autodoc]] modeling_tf_outputs.TFBaseModelOutput ## TFBaseModelOutputWithPooling [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPooling ## TFBaseModelOutputWithPoolingAndCrossAttentions [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPoolingAndCrossAttentions ## TFBaseModelOutputWithPast [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPast ## TFBaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_tf_outputs.TFBaseModelOutputWithPastAndCrossAttentions ## TFSeq2SeqModelOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqModelOutput ## TFCausalLMOutput [[autodoc]] modeling_tf_outputs.TFCausalLMOutput ## TFCausalLMOutputWithCrossAttentions [[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithCrossAttentions ## TFCausalLMOutputWithPast [[autodoc]] modeling_tf_outputs.TFCausalLMOutputWithPast ## TFMaskedLMOutput [[autodoc]] modeling_tf_outputs.TFMaskedLMOutput ## TFSeq2SeqLMOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqLMOutput ## TFNextSentencePredictorOutput [[autodoc]] modeling_tf_outputs.TFNextSentencePredictorOutput ## TFSequenceClassifierOutput [[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutput ## TFSeq2SeqSequenceClassifierOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqSequenceClassifierOutput ## TFMultipleChoiceModelOutput [[autodoc]] modeling_tf_outputs.TFMultipleChoiceModelOutput ## TFTokenClassifierOutput [[autodoc]] modeling_tf_outputs.TFTokenClassifierOutput ## TFQuestionAnsweringModelOutput [[autodoc]] modeling_tf_outputs.TFQuestionAnsweringModelOutput ## TFSeq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_tf_outputs.TFSeq2SeqQuestionAnsweringModelOutput ## FlaxBaseModelOutput [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutput ## FlaxBaseModelOutputWithPast [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPast ## FlaxBaseModelOutputWithPooling [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPooling ## FlaxBaseModelOutputWithPastAndCrossAttentions [[autodoc]] modeling_flax_outputs.FlaxBaseModelOutputWithPastAndCrossAttentions ## FlaxSeq2SeqModelOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqModelOutput ## FlaxCausalLMOutputWithCrossAttentions [[autodoc]] modeling_flax_outputs.FlaxCausalLMOutputWithCrossAttentions ## FlaxMaskedLMOutput [[autodoc]] modeling_flax_outputs.FlaxMaskedLMOutput ## FlaxSeq2SeqLMOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqLMOutput ## FlaxNextSentencePredictorOutput [[autodoc]] modeling_flax_outputs.FlaxNextSentencePredictorOutput ## FlaxSequenceClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxSequenceClassifierOutput ## FlaxSeq2SeqSequenceClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqSequenceClassifierOutput ## FlaxMultipleChoiceModelOutput [[autodoc]] modeling_flax_outputs.FlaxMultipleChoiceModelOutput ## FlaxTokenClassifierOutput [[autodoc]] modeling_flax_outputs.FlaxTokenClassifierOutput ## FlaxQuestionAnsweringModelOutput [[autodoc]] modeling_flax_outputs.FlaxQuestionAnsweringModelOutput ## FlaxSeq2SeqQuestionAnsweringModelOutput [[autodoc]] modeling_flax_outputs.FlaxSeq2SeqQuestionAnsweringModelOutput
transformers/docs/source/zh/main_classes/output.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/output.md", "repo_id": "transformers", "token_count": 3318 }
270
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 使用脚本进行训练 除了 🤗 Transformers [notebooks](./noteboks/README),还有示例脚本演示了如何使用[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)、[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)或[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)训练模型以解决特定任务。 您还可以在这些示例中找到我们在[研究项目](https://github.com/huggingface/transformers/tree/main/examples/research_projects)和[遗留示例](https://github.com/huggingface/transformers/tree/main/examples/legacy)中使用过的脚本,这些脚本主要是由社区贡献的。这些脚本已不再被积极维护,需要使用特定版本的🤗 Transformers, 可能与库的最新版本不兼容。 示例脚本可能无法在初始配置下直接解决每个问题,您可能需要根据要解决的问题调整脚本。为了帮助您,大多数脚本都完全暴露了数据预处理的方式,允许您根据需要对其进行编辑。 如果您想在示例脚本中实现任何功能,请在[论坛](https://discuss.huggingface.co/)或[issue](https://github.com/huggingface/transformers/issues)上讨论,然后再提交Pull Request。虽然我们欢迎修复错误,但不太可能合并添加更多功能的Pull Request,因为这会降低可读性。 本指南将向您展示如何在[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization)和[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization)中运行示例摘要训练脚本。除非另有说明,否则所有示例都可以在两个框架中工作。 ## 设置 要成功运行示例脚本的最新版本,您必须在新虚拟环境中**从源代码安装 🤗 Transformers**: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` 对于旧版本的示例脚本,请点击下面的切换按钮: <details> <summary>老版本🤗 Transformers示例 </summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> 然后切换您clone的 🤗 Transformers 仓到特定的版本,例如v3.5.1: ```bash git checkout tags/v3.5.1 ``` 在安装了正确的库版本后,进入您选择的版本的`example`文件夹并安装例子要求的环境: ```bash pip install -r requirements.txt ``` ## 运行脚本 <frameworkcontent> <pt> 示例脚本从🤗 [Datasets](https://huggingface.co/docs/datasets/)库下载并预处理数据集。然后,脚本通过[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer)使用支持摘要任务的架构对数据集进行微调。以下示例展示了如何在[CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail)数据集上微调[T5-small](https://huggingface.co/google-t5/t5-small)。由于T5模型的训练方式,它需要一个额外的`source_prefix`参数。这个提示让T5知道这是一个摘要任务。 ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> 示例脚本从 🤗 [Datasets](https://huggingface.co/docs/datasets/) 库下载并预处理数据集。然后,脚本使用 Keras 在支持摘要的架构上微调数据集。以下示例展示了如何在 [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) 数据集上微调 [T5-small](https://huggingface.co/google-t5/t5-small)。T5 模型由于训练方式需要额外的 `source_prefix` 参数。这个提示让 T5 知道这是一个摘要任务。 ```bash python examples/tensorflow/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## 分布式训练和混合精度 [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) 支持分布式训练和混合精度,这意味着你也可以在脚本中使用它。要启用这两个功能,可以做如下设置: - 添加 `fp16` 参数以启用混合精度。 - 使用 `nproc_per_node` 参数设置使用的GPU数量。 ```bash torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` TensorFlow脚本使用[`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy)进行分布式训练,您无需在训练脚本中添加任何其他参数。如果可用,TensorFlow脚本将默认使用多个GPU。 ## 在TPU上运行脚本 <frameworkcontent> <pt> 张量处理单元(TPUs)是专门设计用于加速性能的。PyTorch使用[XLA](https://www.tensorflow.org/xla)深度学习编译器支持TPU(更多细节请参见[这里](https://github.com/pytorch/xla/blob/master/README.md))。要使用TPU,请启动`xla_spawn.py`脚本并使用`num_cores`参数设置要使用的TPU核心数量。 ```bash python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> 张量处理单元(TPUs)是专门设计用于加速性能的。TensorFlow脚本使用[`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy)在TPU上进行训练。要使用TPU,请将TPU资源的名称传递给`tpu`参数。 ```bash python run_summarization.py \ --tpu name_of_tpu_resource \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## 基于🤗 Accelerate运行脚本 🤗 [Accelerate](https://huggingface.co/docs/accelerate) 是一个仅支持 PyTorch 的库,它提供了一种统一的方法来在不同类型的设置(仅 CPU、多个 GPU、多个TPU)上训练模型,同时保持对 PyTorch 训练循环的完全可见性。如果你还没有安装 🤗 Accelerate,请确保你已经安装了它: > 注意:由于 Accelerate 正在快速发展,因此必须安装 git 版本的 accelerate 来运行脚本。 ```bash pip install git+https://github.com/huggingface/accelerate ``` 你需要使用`run_summarization_no_trainer.py`脚本,而不是`run_summarization.py`脚本。🤗 Accelerate支持的脚本需要在文件夹中有一个`task_no_trainer.py`文件。首先运行以下命令以创建并保存配置文件: ```bash accelerate config ``` 检测您的设置以确保配置正确: ```bash accelerate test ``` 现在您可以开始训练模型了: ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` ## 使用自定义数据集 摘要脚本支持自定义数据集,只要它们是CSV或JSON Line文件。当你使用自己的数据集时,需要指定一些额外的参数: - `train_file` 和 `validation_file` 分别指定您的训练和验证文件的路径。 - `text_column` 是输入要进行摘要的文本。 - `summary_column` 是目标输出的文本。 使用自定义数据集的摘要脚本看起来是这样的: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` ## 测试脚本 通常,在提交整个数据集之前,最好先在较少的数据集示例上运行脚本,以确保一切按预期工作,因为完整数据集的处理可能需要花费几个小时的时间。使用以下参数将数据集截断为最大样本数: - `max_train_samples` - `max_eval_samples` - `max_predict_samples` ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` 并非所有示例脚本都支持`max_predict_samples`参数。如果您不确定您的脚本是否支持此参数,请添加`-h`参数进行检查: ```bash examples/pytorch/summarization/run_summarization.py -h ``` ## 从checkpoint恢复训练 另一个有用的选项是从之前的checkpoint恢复训练。这将确保在训练中断时,您可以从之前停止的地方继续进行,而无需重新开始。有两种方法可以从checkpoint恢复训练。 第一种方法使用`output_dir previous_output_dir`参数从存储在`output_dir`中的最新的checkpoint恢复训练。在这种情况下,您应该删除`overwrite_output_dir`: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate ``` 第二种方法使用`resume_from_checkpoint path_to_specific_checkpoint`参数从特定的checkpoint文件夹恢复训练。 ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` ## 分享模型 所有脚本都可以将您的最终模型上传到[Model Hub](https://huggingface.co/models)。在开始之前,请确保您已登录Hugging Face: ```bash huggingface-cli login ``` 然后,在脚本中添加`push_to_hub`参数。这个参数会创建一个带有您Hugging Face用户名和`output_dir`中指定的文件夹名称的仓库。 为了给您的仓库指定一个特定的名称,使用`push_to_hub_model_id`参数来添加它。该仓库将自动列出在您的命名空间下。 以下示例展示了如何上传具有特定仓库名称的模型: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ```
transformers/docs/source/zh/run_scripts.md/0
{ "file_path": "transformers/docs/source/zh/run_scripts.md", "repo_id": "transformers", "token_count": 8305 }
271
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library vision-encoder-decoder models for image captioning. """ import json import logging import os import sys import time import warnings from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import evaluate import jax import jax.numpy as jnp import nltk # Here to have a nice missing dependency error message early on import numpy as np import optax from datasets import Dataset, load_dataset from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import HfApi from PIL import Image from tqdm import tqdm import transformers from transformers import ( AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import is_offline_mode, send_example_telemetry logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = np.zeros_like(input_ids) shifted_input_ids[:, 1:] = input_ids[:, :-1] shifted_input_ids[:, 0] = decoder_start_token_id shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids @dataclass class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) _block_size_doc = """ The default value `0` will preprocess (tokenization + image processing) the whole dataset before training and cache the results. This uses more disk space, but avoids (repeated) processing time during training. This is a good option if your disk space is large enough to store the whole processed dataset. If a positive value is given, the captions in the dataset will be tokenized before training and the results are cached. During training, it iterates the dataset in chunks of size `block_size`. On each block, images are transformed by the image processor with the results being kept in memory (no cache), and batches of size `batch_size` are yielded before processing the next block. This could avoid the heavy disk usage when the dataset is large. """ block_size: int = field(default=0, metadata={"help": _block_size_doc}) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) label_smoothing_factor: float = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."} ) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: str = field( metadata={"help": "The model checkpoint for weights initialization."}, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) data_dir: Optional[str] = field( default=None, metadata={"help": "The data directory of the dataset to use (via the datasets library)."} ) image_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full image file paths."}, ) caption_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the image captions."}, ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input predict data file to do prediction on (a text file)."}, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the `max_length` param of `model.generate`, which is used " "during evaluation." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to `model.generate`, " "which is used during evaluation." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] if extension not in ["csv", "json"]: raise ValueError(f"`train_file` should be a csv or a json file, got {extension}.") if self.validation_file is not None: extension = self.validation_file.split(".")[-1] if extension not in ["csv", "json"]: raise ValueError(f"`validation_file` should be a csv or a json file, got {extension}.") if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length image_captioning_name_mapping = { "image_caption_dataset.py": ("image_path", "caption"), } class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False): """ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. """ steps = len(dataset) // batch_size # Skip incomplete batch. # We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a # dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation # mechanism, which works differently from NumPy/SciPy. if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) batch_idx = np.asarray(batch_idx) else: batch_idx = np.arange(len(dataset)) for idx in range(steps): start_idx = batch_size * idx end_idx = batch_size * (idx + 1) selected_indices = batch_idx[start_idx:end_idx] batch = dataset[selected_indices] batch = shard(batch) yield batch def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"): if train_time: summary_writer.scalar("train_time", train_time, step) metrics = get_metrics(metrics) for key, vals in metrics.items(): tag = f"{metric_key_prefix}_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) else: for metric_name, value in metrics.items(): summary_writer.scalar(f"{metric_key_prefix}_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.ndarray]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_captioning", model_args, data_args, framework="flax") if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Handle the repository creation if training_args.push_to_hub: # Retrieve of infer repo_name repo_name = training_args.hub_model_id if repo_name is None: repo_name = Path(training_args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full image path and the second column for the # captions (unless you specify column names for this with the `image_column` and `caption_column` arguments). # if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False, data_dir=data_args.data_dir, token=model_args.token, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] dataset = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer model = FlaxVisionEncoderDecoderModel.from_pretrained( model_args.model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) image_processor = AutoImageProcessor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id) # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = dataset["train"].column_names elif training_args.do_eval: column_names = dataset["validation"].column_names elif training_args.do_predict: column_names = dataset["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Get the column names for input/target. dataset_columns = image_captioning_name_mapping.get(data_args.dataset_name, None) if data_args.image_column is None: if dataset_columns is None: raise ValueError( f"`--dataset_name` {data_args.dataset_name} not found in dataset '{data_args.dataset_name}'. Make sure" " to set `--dataset_name` to the correct dataset name, one of" f" {', '.join(image_captioning_name_mapping.keys())}." ) image_column = dataset_columns[0] else: image_column = data_args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.caption_column is None: if dataset_columns is None: raise ValueError( f"`--dataset_name` {data_args.dataset_name} not found in dataset '{data_args.dataset_name}'. Make sure" " to set `--dataset_name` to the correct dataset name, one of" f" {', '.join(image_captioning_name_mapping.keys())}." ) caption_column = dataset_columns[1] else: caption_column = data_args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # In Flax, for seq2seq models we need to pass `decoder_input_ids` # as the Flax models don't accept `labels`, we need to prepare the decoder_input_ids here # for that dynamically import the `shift_tokens_right` function from the model file model_module = __import__(model.__module__, fromlist=["shift_tokens_right"]) shift_tokens_right_fn = getattr(model_module, "shift_tokens_right", shift_tokens_right) def filter_fn(examples): """remove problematic images""" bools = [] for image_file in examples[image_column]: try: image = Image.open(image_file) image_processor(images=image, return_tensors="np") bools.append(True) except Exception: bools.append(False) return bools # Setting padding="max_length" as we need fixed length inputs for jitted functions def tokenization_fn(examples, max_target_length): """Run tokenization on captions.""" captions = [] for caption in examples[caption_column]: captions.append(caption.lower() + " " + tokenizer.eos_token) targets = captions model_inputs = {} labels = tokenizer( text_target=targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np", ) model_inputs["labels"] = labels["input_ids"] decoder_input_ids = shift_tokens_right_fn( labels["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id ) model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids) # We need decoder_attention_mask so we can ignore pad tokens from loss model_inputs["decoder_attention_mask"] = labels["attention_mask"] model_inputs[image_column] = examples[image_column] return model_inputs def image_processing_fn(examples, check_image=True): """ Run preprocessing on images If `check_image` is `True`, the examples that fails during `Image.open()` will be caught and discarded. Otherwise, an exception will be thrown. """ model_inputs = {} if check_image: images = [] to_keep = [] for image_file in examples[image_column]: try: img = Image.open(image_file) images.append(img) to_keep.append(True) except Exception: to_keep.append(False) for k, v in examples.items(): if k != image_column: model_inputs[k] = v[to_keep] else: images = [Image.open(image_file) for image_file in examples[image_column]] encoder_inputs = image_processor(images=images, return_tensors="np") model_inputs["pixel_values"] = encoder_inputs.pixel_values return model_inputs def preprocess_fn(examples, max_target_length, check_image=True): """Run tokenization + image processing""" model_inputs = {} # This contains image path column model_inputs.update(tokenization_fn(examples, max_target_length)) model_inputs.update(image_processing_fn(model_inputs, check_image=check_image)) # Remove image path column model_inputs.pop(image_column) return model_inputs features = datasets.Features( { "pixel_values": datasets.Array3D( shape=( getattr(model.config.encoder, "num_channels", 3), model.config.encoder.image_size, model.config.encoder.image_size, ), dtype="float32", ), "labels": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None), "decoder_input_ids": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None), "decoder_attention_mask": datasets.Sequence( feature=datasets.Value(dtype="int32", id=None), length=-1, id=None ), } ) # If `block_size` is `0`, tokenization & image processing is done at the beginning run_img_proc_at_beginning = training_args.block_size == 0 # Used in .map() below function_kwarg = preprocess_fn if run_img_proc_at_beginning else tokenization_fn # `features` is used only for the final preprocessed dataset (for the performance purpose). features_kwarg = features if run_img_proc_at_beginning else None # Keep `image_column` if the image processing is done during training remove_columns_kwarg = [x for x in column_names if x != image_column or run_img_proc_at_beginning] processor_names = "tokenizer and image processor" if run_img_proc_at_beginning else "tokenizer" # Store some constant train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() if training_args.block_size % train_batch_size > 0 or training_args.block_size % eval_batch_size > 0: raise ValueError( "`training_args.block_size` needs to be a multiple of the global train/eval batch size. " f"Got {training_args.block_size}, {train_batch_size} and {eval_batch_size} respectively instead." ) if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") train_dataset = dataset["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # remove problematic examples # (if image processing is performed at the beginning, the filtering is done during preprocessing below # instead here.) if not run_img_proc_at_beginning: train_dataset = train_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers) train_dataset = train_dataset.map( function=function_kwarg, batched=True, num_proc=data_args.preprocessing_num_workers, # kept image paths remove_columns=remove_columns_kwarg, load_from_cache_file=not data_args.overwrite_cache, desc=f"Running {processor_names} on train dataset", fn_kwargs={"max_target_length": data_args.max_target_length}, features=features_kwarg, ) if run_img_proc_at_beginning: # set format (for performance) since the dataset is ready to be used train_dataset = train_dataset.with_format("numpy") steps_per_epoch = len(train_dataset) // train_batch_size num_train_examples_per_epoch = steps_per_epoch * train_batch_size num_epochs = int(training_args.num_train_epochs) total_train_steps = steps_per_epoch * num_epochs else: num_train_examples_per_epoch = 0 if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") eval_dataset = dataset["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) # remove problematic examples # (if image processing is performed at the beginning, the filtering is done during preprocessing below # instead here.) if not run_img_proc_at_beginning: eval_dataset = eval_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers) eval_dataset = eval_dataset.map( function=function_kwarg, batched=True, num_proc=data_args.preprocessing_num_workers, # kept image paths remove_columns=remove_columns_kwarg, load_from_cache_file=not data_args.overwrite_cache, desc=f"Running {processor_names} on validation dataset", fn_kwargs={"max_target_length": data_args.val_max_target_length}, features=features_kwarg, ) if run_img_proc_at_beginning: # set format (for performance) since the dataset is ready to be used eval_dataset = eval_dataset.with_format("numpy") num_eval_examples = len(eval_dataset) eval_steps = num_eval_examples // eval_batch_size if training_args.do_predict: if "test" not in dataset: raise ValueError("--do_predict requires a test dataset") predict_dataset = dataset["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # remove problematic examples # (if image processing is performed at the beginning, the filtering is done during preprocessing below # instead here.) if not run_img_proc_at_beginning: predict_dataset = predict_dataset.filter( filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers ) predict_dataset = predict_dataset.map( function=function_kwarg, batched=True, num_proc=data_args.preprocessing_num_workers, # kept image paths remove_columns=remove_columns_kwarg, load_from_cache_file=not data_args.overwrite_cache, desc=f"Running {processor_names} on prediction dataset", fn_kwargs={"max_target_length": data_args.val_max_target_length}, features=features_kwarg, ) if run_img_proc_at_beginning: # set format (for performance) since the dataset is ready to be used predict_dataset = predict_dataset.with_format("numpy") num_test_examples = len(predict_dataset) test_steps = num_test_examples // eval_batch_size def blockwise_data_loader( rng: jax.random.PRNGKey, ds: Dataset, block_size: int, batch_size: int, shuffle: bool = False, keep_in_memory: bool = False, split: str = "", ): """ Wrap the simple `data_loader` in a block-wise way if `block_size` > 0, else it's the same as `data_loader`. If `block_size` > 0, it requires `ds` to have a column that gives image paths in order to perform image processing (with the column name being specified by `image_column`). The tokenization should be done before training in this case. """ # We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a # dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation # mechanism, which works differently from NumPy/SciPy. if shuffle: indices = jax.random.permutation(rng, len(ds)) indices = np.asarray(indices) else: indices = np.arange(len(ds)) _block_size = len(ds) if not block_size else block_size steps_per_block = _block_size // batch_size num_examples = len(ds) steps = num_examples // batch_size num_splits = steps // steps_per_block + int(steps % steps_per_block > 0) for idx in range(num_splits): if not block_size: _ds = ds else: start_idx = block_size * idx end_idx = block_size * (idx + 1) selected_indices = indices[start_idx:end_idx] _ds = ds.select(selected_indices) _ds = _ds.map( image_processing_fn, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[image_column], load_from_cache_file=not data_args.overwrite_cache, features=features, keep_in_memory=keep_in_memory, # The images are already checked either in `.filter()` or in `preprocess_fn()` fn_kwargs={"check_image": False}, desc=f"Running image processing on {split} dataset".replace(" ", " "), ) _ds = _ds.with_format("numpy") # No need to shuffle here loader = data_loader(rng, _ds, batch_size=batch_size, shuffle=False) for batch in loader: yield batch # Metric metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] # rougeLSum expects newline after each sentence preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] return preds, labels def compute_metrics(preds, labels): decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) # Extract a few results from ROUGE result = {key: value.mid.fmeasure * 100 for key, value in result.items()} prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] result["gen_len"] = np.mean(prediction_lens) result = {k: round(v, 6) for k, v in result.items()} return result, decoded_preds, decoded_labels # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( num_train_examples_per_epoch, train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = { layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() } flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) # label smoothed cross entropy def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): """ The label smoothing implementation is adapted from Flax's official example: https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104 """ vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing_factor low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -( confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) ) soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence) loss = optax.softmax_cross_entropy(logits, soft_labels) loss = loss - normalizing_constant # ignore padded tokens from loss loss = loss * padding_mask loss = loss.sum() num_labels = padding_mask.sum() return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) return loss, num_labels grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) num_labels = jax.lax.psum(num_labels, "batch") # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics # Define generation function max_length = ( data_args.val_max_target_length if data_args.val_max_target_length is not None else model.config.max_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else model.config.num_beams gen_kwargs = {"max_length": max_length, "num_beams": num_beams} def generate_step(params, batch): model.params = params output_ids = model.generate(batch["pixel_values"], **gen_kwargs) return output_ids.sequences # Create parallel version of the train and eval step p_train_step = jax.pmap( partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,) ) p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch") p_generate_step = jax.pmap(generate_step, "batch") # Replicate the train state on each device state = state.replicate() if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num train examples = {num_train_examples_per_epoch}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous train batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Optimization steps per epoch = {steps_per_epoch}") logger.info(f" Total optimization steps = {total_train_steps}") if training_args.do_eval: logger.info(f" Num evaluation examples = {num_eval_examples}") logger.info(f" Instantaneous evaluation batch size per device = {training_args.per_device_eval_batch_size}") logger.info(f" Total evaluation batch size (w. parallel & distributed) = {eval_batch_size}") logger.info(f" Evaluation steps = {eval_steps}") if training_args.do_predict: logger.info(f" Num test examples = {num_test_examples}") logger.info(f" Instantaneous test batch size per device = {training_args.per_device_eval_batch_size}") logger.info(f" Total test batch size (w. parallel & distributed) = {eval_batch_size}") logger.info(f" Test steps = {test_steps}") # create output directory if not os.path.isdir(os.path.join(training_args.output_dir)): os.makedirs(os.path.join(training_args.output_dir), exist_ok=True) def save_ckpt(ckpt_dir: str, commit_msg: str = ""): """save checkpoints and push to Hugging Face Hub if specified""" # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params) tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir)) if training_args.push_to_hub: api.upload_folder( commit_message=commit_msg, folder_path=training_args.output_dir, repo_id=repo_id, repo_type="model", token=training_args.hub_token, ) def evaluation_loop( rng: jax.random.PRNGKey, dataset: Dataset, metric_key_prefix: str = "eval", ckpt_dir: str = "", is_prediction=False, ): logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***") metrics = [] preds = [] labels = [] batches = blockwise_data_loader( rng, dataset, block_size=training_args.block_size, batch_size=eval_batch_size, keep_in_memory=False, shuffle=False, split="prediction" if is_prediction else "validation", ) steps = len(dataset) // eval_batch_size for _ in tqdm( range(steps), desc=f"{'Predicting' if is_prediction else 'Evaluating'}...", position=2, leave=False ): # Model forward batch = next(batches) _labels = batch.get("labels", None) if not is_prediction and _labels is None: raise ValueError("Evaluation requires the validation dataset to have `labels`") if _labels is not None: _metrics = p_eval_step(state.params, batch) metrics.append(_metrics) # generation if data_args.predict_with_generate: generated_ids = p_generate_step(state.params, batch) preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"]))) if _labels is not None: labels.extend(jax.device_get(_labels.reshape(-1, _labels.shape[-1]))) if metrics: # normalize metrics metrics = get_metrics(metrics) metrics = jax.tree_util.tree_map(jnp.mean, metrics) # compute ROUGE metrics generations = [] rouge_desc = "" if data_args.predict_with_generate: if labels: rouge_metrics, decoded_preds, decoded_labels = compute_metrics(preds, labels) metrics.update(rouge_metrics) rouge_desc = " ".join( [ f"{'Predict' if is_prediction else 'Eval'} {key}: {value} |" for key, value in rouge_metrics.items() ] ) for pred, label in zip(decoded_preds, decoded_labels): pred = pred.replace("\n", " ") label = label.replace("\n", " ") generations.append({"label": label, "pred": pred}) else: decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # Some simple post-processing decoded_preds = [pred.strip() for pred in decoded_preds] # rougeLSum expects newline after each sentence decoded_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in decoded_preds] for pred in decoded_preds: pred = pred.replace("\n", " ") generations.append({"pred": pred}) if metrics: # Print metrics and update progress bar desc = f"{'Predict' if is_prediction else 'Eval'} Loss: {metrics['loss']} | {rouge_desc})" if training_args.do_train and not is_prediction: desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | " + desc epochs.write(desc) epochs.desc = desc logger.info(desc) if jax.process_index() == 0: if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)): os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True) if metrics: # Save metrics (only for the evaluation/prediction being done along with training) if has_tensorboard and training_args.do_train: write_metric( summary_writer, metrics, train_time=None, step=cur_step, metric_key_prefix=metric_key_prefix ) # save final metrics in json metrics = { f"{metric_key_prefix}_{metric_name}": round(value.item(), 6) for metric_name, value in metrics.items() } _path = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_results.json") with open(_path, "w") as f: json.dump(metrics, f, indent=4, sort_keys=True) # Update report with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp: fp.write(desc + "\n") # Save generations if generations: output_file = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_generation.json") with open(output_file, "w", encoding="UTF-8") as fp: json.dump(generations, fp, ensure_ascii=False, indent=4) def evaluate(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""): evaluation_loop(rng, dataset, metric_key_prefix="eval", ckpt_dir=ckpt_dir) def predict(rng: jax.random.PRNGKey, dataset: Dataset): evaluation_loop(rng, dataset, metric_key_prefix="test", is_prediction=True) input_rng = None if training_args.do_train: cur_step = 0 train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ # Create sampling rng rng, input_rng = jax.random.split(rng) train_metrics = [] train_batches = blockwise_data_loader( input_rng, train_dataset, block_size=training_args.block_size, batch_size=train_batch_size, keep_in_memory=True, shuffle=True, split="train", ) # train for batch_idx, _ in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)): cur_step += 1 batch = next(train_batches) batch_start = time.time() state, train_metric = p_train_step(state, batch) train_metrics.append(train_metric) train_time += time.time() - batch_start time_per_step = train_time / cur_step # log and save info if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0: _train_metric = unreplicate(train_metric) desc = ( f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} |" f" Learning Rate: {_train_metric['learning_rate']} | Time per step: {time_per_step})" ) epochs.desc = desc epochs.write(desc) logger.info(desc) with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp: fp.write(desc + "\n") # Save metrics if has_tensorboard and jax.process_index() == 0: write_metric( summary_writer, train_metrics, train_time=train_time, step=cur_step, metric_key_prefix="train", ) # ======================== Evaluating (inside an epoch) ============================== if ( training_args.do_eval and (training_args.eval_steps is not None and training_args.eval_steps > 0) and cur_step % training_args.eval_steps == 0 ): ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}" commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}" evaluate(input_rng, eval_dataset, ckpt_dir) save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg) # ======================== Epoch End ============================== # log and save info if training_args.logging_steps <= 0: logger.info(desc) with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp: fp.write(desc + "\n") # Save metrics if has_tensorboard and jax.process_index() == 0: write_metric( summary_writer, train_metrics, train_time=train_time, step=cur_step, metric_key_prefix="train" ) # ======================== Evaluating (after each epoch) ============================== if training_args.do_eval and (training_args.eval_steps is None or training_args.eval_steps <= 0): ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}" commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}" evaluate(input_rng, eval_dataset, ckpt_dir) save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg) # ======================== Evaluating | Predicting ============================== # Create sampling rng if input_rng is None: rng, input_rng = jax.random.split(rng) # run evaluation without training if training_args.do_eval and not training_args.do_train: evaluate(input_rng, eval_dataset) # run prediction after (or without) training if training_args.do_predict: predict(input_rng, predict_dataset) if __name__ == "__main__": main()
transformers/examples/flax/image-captioning/run_image_captioning_flax.py/0
{ "file_path": "transformers/examples/flax/image-captioning/run_image_captioning_flax.py", "repo_id": "transformers", "token_count": 24575 }
272
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training """ from transformers import HfArgumentParser, PyTorchBenchmark, PyTorchBenchmarkArguments def main(): parser = HfArgumentParser(PyTorchBenchmarkArguments) try: benchmark_args = parser.parse_args_into_dataclasses()[0] except ValueError as e: arg_error_msg = "Arg --no_{0} is no longer used, please use --no-{0} instead." begin_error_msg = " ".join(str(e).split(" ")[:-1]) full_error_msg = "" depreciated_args = eval(str(e).split(" ")[-1]) wrong_args = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in PyTorchBenchmarkArguments.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:]) else: wrong_args.append(arg) if len(wrong_args) > 0: full_error_msg = full_error_msg + begin_error_msg + str(wrong_args) raise ValueError(full_error_msg) benchmark = PyTorchBenchmark(args=benchmark_args) benchmark.run() if __name__ == "__main__": main()
transformers/examples/legacy/benchmarking/run_benchmark.py/0
{ "file_path": "transformers/examples/legacy/benchmarking/run_benchmark.py", "repo_id": "transformers", "token_count": 699 }
273
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT model fine-tuning script. Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset: python run_openai_gpt.py \ --model_name openai-community/openai-gpt \ --do_train \ --do_eval \ --train_dataset "$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv" \ --eval_dataset "$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv" \ --output_dir ../log \ --train_batch_size 16 \ """ import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels) def load_rocstories_dataset(dataset_path): """Output a list of tuples(story, 1st continuation, 2nd continuation, label)""" with open(dataset_path, encoding="utf_8") as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1)) return output def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for ( i, (story, cont1, cont2, mc_label), ) in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, : len(with_cont1)] = with_cont1 input_ids[i, 1, : len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, : len(with_cont1)] = with_cont1 lm_labels[i, 1, : len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="openai-community/openai-gpt", help="pretrained model name") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--train_dataset", type=str, default="") parser.add_argument("--eval_dataset", type=str, default="") parser.add_argument("--seed", type=int, default=42) parser.add_argument("--num_train_epochs", type=int, default=3) parser.add_argument("--train_batch_size", type=int, default=8) parser.add_argument("--eval_batch_size", type=int, default=16) parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", type=int, default=1) parser.add_argument( "--max_steps", default=-1, type=int, help=( "If > 0: set total number of training steps to perform. Override num_train_epochs." ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", type=float, default=6.25e-5) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--lr_schedule", type=str, default="warmup_linear") parser.add_argument("--weight_decay", type=float, default=0.01) parser.add_argument("--lm_coef", type=float, default=0.9) parser.add_argument("--n_valid", type=int, default=374) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() print(args) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() logger.info("device: {}, n_gpu {}".format(device, n_gpu)) if not args.do_train and not args.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset special_tokens = ["_start_", "_delimiter_", "_classify_"] tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name) tokenizer.add_tokens(special_tokens) special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens) model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name) model.resize_token_embeddings(len(tokenizer)) model.to(device) # Load and encode the datasets def tokenize_and_encode(obj): """Tokenize and encode a nested object""" if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) elif isinstance(obj, int): return obj return [tokenize_and_encode(o) for o in obj] logger.info("Encoding dataset...") train_dataset = load_rocstories_dataset(args.train_dataset) eval_dataset = load_rocstories_dataset(args.eval_dataset) datasets = (train_dataset, eval_dataset) encoded_datasets = tokenize_and_encode(datasets) # Compute the max input length for the Transformer max_length = model.config.n_positions // 2 - 2 input_length = max( len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 for dataset in encoded_datasets for story, cont1, cont2, _ in dataset ) input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids) train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1] train_data = TensorDataset(*train_tensor_dataset) train_sampler = RandomSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size) eval_data = TensorDataset(*eval_tensor_dataset) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size) # Prepare optimizer if args.do_train: if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs param_optimizer = list(model.named_parameters()) no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.do_train: nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0 nb_tr_steps = 0 tqdm_bar = tqdm(train_dataloader, desc="Training") for step, batch in enumerate(tqdm_bar): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels) loss = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() exp_average_loss = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0]) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer model_to_save = model.module if hasattr(model, "module") else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Load a trained model and vocabulary that you have fine-tuned model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir) tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir) model.to(device) if args.do_eval: model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss, _, mc_logits = model( input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels ) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to("cpu").numpy() tmp_eval_accuracy = accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples train_loss = tr_loss / nb_tr_steps if args.do_train else None result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": main()
transformers/examples/legacy/run_openai_gpt.py/0
{ "file_path": "transformers/examples/legacy/run_openai_gpt.py", "repo_id": "transformers", "token_count": 6175 }
274
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for named entity recognition on CoNLL-2003. """ import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) task_type: Optional[str] = field( default="NER", metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ data_dir: str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) labels: Optional[str] = field( default=None, metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) module = import_module("tasks") try: token_classification_task_clazz = getattr(module, model_args.task_type) token_classification_task: TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Prepare CONLL-2003 task labels = token_classification_task.get_labels(data_args.labels) label_map: Dict[int, str] = dict(enumerate(labels)) num_labels = len(labels) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, id2label=label_map, label2id={label: i for i, label in enumerate(labels)}, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( TokenClassificationDataset( token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) eval_dataset = ( TokenClassificationDataset( token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]: preds = np.argmax(predictions, axis=2) batch_size, seq_len = preds.shape out_label_list = [[] for _ in range(batch_size)] preds_list = [[] for _ in range(batch_size)] for i in range(batch_size): for j in range(seq_len): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) return preds_list, out_label_list def compute_metrics(p: EvalPrediction) -> Dict: preds_list, out_label_list = align_predictions(p.predictions, p.label_ids) return { "accuracy_score": accuracy_score(out_label_list, preds_list), "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") result = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in result.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) results.update(result) # Predict if training_args.do_predict: test_dataset = TokenClassificationDataset( token_classification_task=token_classification_task, data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, ) predictions, label_ids, metrics = trainer.predict(test_dataset) preds_list, _ = align_predictions(predictions, label_ids) output_test_results_file = os.path.join(training_args.output_dir, "test_results.txt") if trainer.is_world_process_zero(): with open(output_test_results_file, "w") as writer: for key, value in metrics.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: with open(os.path.join(data_args.data_dir, "test.txt"), "r") as f: token_classification_task.write_predictions_to_file(writer, f, preds_list) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/token-classification/run_ner.py/0
{ "file_path": "transformers/examples/legacy/token-classification/run_ner.py", "repo_id": "transformers", "token_count": 5023 }
275
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import logging import os import sys import warnings from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version """ Fine-tuning a 🤗 Transformers model for image classification""" logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.39.0.dev0") require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def pil_loader(path: str): with open(path, "rb") as f: im = Image.open(f) return im.convert("RGB") @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default=None, metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the training data."}) validation_dir: Optional[str] = field(default=None, metadata={"help": "A folder containing the validation data."}) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) image_column_name: str = field( default="image", metadata={"help": "The name of the dataset column containing the image data. Defaults to 'image'."}, ) label_column_name: str = field( default="label", metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'."}, ) def __post_init__(self): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="google/vit-base-patch16-224-in21k", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) use_auth_token: bool = field( default=None, metadata={ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead." }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if model_args.use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead.", FutureWarning, ) if model_args.token is not None: raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.") model_args.token = model_args.use_auth_token # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_image_classification", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, ) else: data_files = {} if data_args.train_dir is not None: data_files["train"] = os.path.join(data_args.train_dir, "**") if data_args.validation_dir is not None: data_files["validation"] = os.path.join(data_args.validation_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=model_args.cache_dir, ) dataset_column_names = dataset["train"].column_names if "train" in dataset else dataset["validation"].column_names if data_args.image_column_name not in dataset_column_names: raise ValueError( f"--image_column_name {data_args.image_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--image_column_name` to the correct audio column - one of " f"{', '.join(dataset_column_names)}." ) if data_args.label_column_name not in dataset_column_names: raise ValueError( f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--label_column_name` to the correct text column - one of " f"{', '.join(dataset_column_names)}." ) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example[data_args.label_column_name] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset["train"].features[data_args.label_column_name].names label2id, id2label = {}, {} for i, label in enumerate(labels): label2id[label] = str(i) id2label[str(i)] = label # Load the accuracy metric from the datasets package metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p): """Computes accuracy on a batch of predictions""" return metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids) config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, num_labels=len(labels), label2id=label2id, id2label=id2label, finetuning_task="image-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: size = image_processor.size["shortest_edge"] else: size = (image_processor.size["height"], image_processor.size["width"]) normalize = ( Normalize(mean=image_processor.image_mean, std=image_processor.image_std) if hasattr(image_processor, "image_mean") and hasattr(image_processor, "image_std") else Lambda(lambda x: x) ) _train_transforms = Compose( [ RandomResizedCrop(size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) _val_transforms = Compose( [ Resize(size), CenterCrop(size), ToTensor(), normalize, ] ) def train_transforms(example_batch): """Apply _train_transforms across a batch.""" example_batch["pixel_values"] = [ _train_transforms(pil_img.convert("RGB")) for pil_img in example_batch[data_args.image_column_name] ] return example_batch def val_transforms(example_batch): """Apply _val_transforms across a batch.""" example_batch["pixel_values"] = [ _val_transforms(pil_img.convert("RGB")) for pil_img in example_batch[data_args.image_column_name] ] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(train_transforms) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(val_transforms) # Initialize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=image_processor, data_collator=collate_fn, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": "image-classification", "dataset": data_args.dataset_name, "tags": ["image-classification", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
transformers/examples/pytorch/image-classification/run_image_classification.py/0
{ "file_path": "transformers/examples/pytorch/image-classification/run_image_classification.py", "repo_id": "transformers", "token_count": 7153 }
276
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Multiple Choice ## Fine-tuning on SWAG with the Trainer `run_swag` allows you to fine-tune any model from our [hub](https://huggingface.co/models) (as long as its architecture as a `ForMultipleChoice` version in the library) on the SWAG dataset or your own csv/jsonlines files as long as they are structured the same way. To make it works on another dataset, you will need to tweak the `preprocess_function` inside the script. ```bash python examples/multiple-choice/run_swag.py \ --model_name_or_path FacebookAI/roberta-base \ --do_train \ --do_eval \ --learning_rate 5e-5 \ --num_train_epochs 3 \ --output_dir /tmp/swag_base \ --per_device_eval_batch_size=16 \ --per_device_train_batch_size=16 \ --overwrite_output ``` Training with the defined hyper-parameters yields the following results: ``` ***** Eval results ***** eval_acc = 0.8338998300509847 eval_loss = 0.44457291918821606 ``` ## With Accelerate Based on the script [run_swag_no_trainer.py](https://github.com/huggingface/transformers/blob/main/examples/pytorch/multiple-choice/run_swag_no_trainer.py). Like `run_swag.py`, this script allows you to fine-tune any of the models on the [hub](https://huggingface.co/models) (as long as its architecture as a `ForMultipleChoice` version in the library) on the SWAG dataset or your own data in a csv or a JSON file. The main difference is that this script exposes the bare training loop, to allow you to quickly experiment and add any customization you would like. It offers less options than the script with `Trainer` (but you can easily change the options for the optimizer or the dataloaders directly in the script) but still run in a distributed setup, on TPU and supports mixed precision by the mean of the [🤗 `Accelerate`](https://github.com/huggingface/accelerate) library. You can use the script normally after installing it: ```bash pip install git+https://github.com/huggingface/accelerate ``` then ```bash export DATASET_NAME=swag python run_swag_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name $DATASET_NAME \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$DATASET_NAME/ ``` You can then use your usual launchers to run in it in a distributed environment, but the easiest way is to run ```bash accelerate config ``` and reply to the questions asked. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash export DATASET_NAME=swag accelerate launch run_swag_no_trainer.py \ --model_name_or_path google-bert/bert-base-cased \ --dataset_name $DATASET_NAME \ --max_seq_length 128 \ --per_device_train_batch_size 32 \ --learning_rate 2e-5 \ --num_train_epochs 3 \ --output_dir /tmp/$DATASET_NAME/ ``` This command is the same and will work for: - a CPU-only setup - a setup with one GPU - a distributed training with several GPUs (single or multi node) - a training on TPUs Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
transformers/examples/pytorch/multiple-choice/README.md/0
{ "file_path": "transformers/examples/pytorch/multiple-choice/README.md", "repo_id": "transformers", "token_count": 1170 }
277
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Semantic segmentation examples This directory contains 2 scripts that showcase how to fine-tune any model supported by the [`AutoModelForSemanticSegmentation` API](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModelForSemanticSegmentation) (such as [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer), [BEiT](https://huggingface.co/docs/transformers/main/en/model_doc/beit), [DPT](https://huggingface.co/docs/transformers/main/en/model_doc/dpt)) using PyTorch. ![segformer_inference_widget](https://user-images.githubusercontent.com/48327001/163667406-01f323a6-72ec-4e7e-bdeb-7d9da71b0697.gif) Content: * [Note on custom data](#note-on-custom-data) * [PyTorch version, Trainer](#pytorch-version-trainer) * [PyTorch version, no Trainer](#pytorch-version-no-trainer) * [Reload and perform inference](#reload-and-perform-inference) * [Important notes](#important-notes) ## Note on custom data In case you'd like to use the script with custom data, there are 2 things required: 1) creating a DatasetDict 2) creating an id2label mapping. Below, these are explained in more detail. ### Creating a `DatasetDict` The script assumes that you have a `DatasetDict` with 2 columns, "image" and "label", both of type [Image](https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Image). This can be created as follows: ```python from datasets import Dataset, DatasetDict, Image # your images can of course have a different extension # semantic segmentation maps are typically stored in the png format image_paths_train = ["path/to/image_1.jpg/jpg", "path/to/image_2.jpg/jpg", ..., "path/to/image_n.jpg/jpg"] label_paths_train = ["path/to/annotation_1.png", "path/to/annotation_2.png", ..., "path/to/annotation_n.png"] # same for validation # image_paths_validation = [...] # label_paths_validation = [...] def create_dataset(image_paths, label_paths): dataset = Dataset.from_dict({"image": sorted(image_paths), "label": sorted(label_paths)}) dataset = dataset.cast_column("image", Image()) dataset = dataset.cast_column("label", Image()) return dataset # step 1: create Dataset objects train_dataset = create_dataset(image_paths_train, label_paths_train) validation_dataset = create_dataset(image_paths_validation, label_paths_validation) # step 2: create DatasetDict dataset = DatasetDict({ "train": train_dataset, "validation": validation_dataset, } ) # step 3: push to hub (assumes you have ran the huggingface-cli login command in a terminal/notebook) dataset.push_to_hub("name of repo on the hub") # optionally, you can push to a private repo on the hub # dataset.push_to_hub("name of repo on the hub", private=True) ``` An example of such a dataset can be seen at [nielsr/ade20k-demo](https://huggingface.co/datasets/nielsr/ade20k-demo). ### Creating an id2label mapping Besides that, the script also assumes the existence of an `id2label.json` file in the repo, containing a mapping from integers to actual class names. An example of that can be seen [here](https://huggingface.co/datasets/nielsr/ade20k-demo/blob/main/id2label.json). This can be created in Python as follows: ```python import json # simple example id2label = {0: 'cat', 1: 'dog'} with open('id2label.json', 'w') as fp: json.dump(id2label, fp) ``` You can easily upload this by clicking on "Add file" in the "Files and versions" tab of your repo on the hub. ## PyTorch version, Trainer Based on the script [`run_semantic_segmentation.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py). The script leverages the [🤗 Trainer API](https://huggingface.co/docs/transformers/main_classes/trainer) to automatically take care of the training for you, running on distributed environments right away. Here we show how to fine-tune a [SegFormer](https://huggingface.co/nvidia/mit-b0) model on the [segments/sidewalk-semantic](https://huggingface.co/datasets/segments/sidewalk-semantic) dataset: ```bash python run_semantic_segmentation.py \ --model_name_or_path nvidia/mit-b0 \ --dataset_name segments/sidewalk-semantic \ --output_dir ./segformer_outputs/ \ --remove_unused_columns False \ --do_train \ --do_eval \ --evaluation_strategy steps \ --push_to_hub \ --push_to_hub_model_id segformer-finetuned-sidewalk-10k-steps \ --max_steps 10000 \ --learning_rate 0.00006 \ --lr_scheduler_type polynomial \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 100 \ --evaluation_strategy epoch \ --save_strategy epoch \ --seed 1337 ``` The resulting model can be seen here: https://huggingface.co/nielsr/segformer-finetuned-sidewalk-10k-steps. The corresponding Weights and Biases report [here](https://wandb.ai/nielsrogge/huggingface/reports/SegFormer-fine-tuning--VmlldzoxODY5NTQ2). Note that it's always advised to check the original paper to know the details regarding training hyperparameters. E.g. from the SegFormer paper: > We trained the models using AdamW optimizer for 160K iterations on ADE20K, Cityscapes, and 80K iterations on COCO-Stuff. (...) We used a batch size of 16 for ADE20K and COCO-Stuff, and a batch size of 8 for Cityscapes. The learning rate was set to an initial value of 0.00006 and then used a “poly” LR schedule with factor 1.0 by default. Note that you can replace the model and dataset by simply setting the `model_name_or_path` and `dataset_name` arguments respectively, with any model or dataset from the [hub](https://huggingface.co/). For an overview of all possible arguments, we refer to the [docs](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) of the `TrainingArguments`, which can be passed as flags. ## PyTorch version, no Trainer Based on the script [`run_semantic_segmentation_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py). The script leverages [🤗 `Accelerate`](https://github.com/huggingface/accelerate), which allows to write your own training loop in PyTorch, but have it run instantly on any (distributed) environment, including CPU, multi-CPU, GPU, multi-GPU and TPU. It also supports mixed precision. First, run: ```bash accelerate config ``` and reply to the questions asked regarding the environment on which you'd like to train. Then ```bash accelerate test ``` that will check everything is ready for training. Finally, you can launch training with ```bash accelerate launch run_semantic_segmentation_no_trainer.py --output_dir segformer-finetuned-sidewalk --with_tracking --push_to_hub ``` and boom, you're training, possibly on multiple GPUs, logging everything to all trackers found in your environment (like Weights and Biases, Tensorboard) and regularly pushing your model to the hub (with the repo name being equal to `args.output_dir` at your HF username) 🤗 With the default settings, the script fine-tunes a [SegFormer]((https://huggingface.co/docs/transformers/main/en/model_doc/segformer)) model on the [segments/sidewalk-semantic](https://huggingface.co/datasets/segments/sidewalk-semantic) dataset. The resulting model can be seen here: https://huggingface.co/nielsr/segformer-finetuned-sidewalk. Note that the script usually requires quite a few epochs to achieve great results, e.g. the SegFormer authors fine-tuned their model for 160k steps (batches) on [`scene_parse_150`](https://huggingface.co/datasets/scene_parse_150). ## Reload and perform inference This means that after training, you can easily load your trained model as follows: ```python from transformers import AutoImageProcessor, AutoModelForSemanticSegmentation model_name = "name_of_repo_on_the_hub_or_path_to_local_folder" image_processor = AutoImageProcessor.from_pretrained(model_name) model = AutoModelForSemanticSegmentation.from_pretrained(model_name) ``` and perform inference as follows: ```python from PIL import Image import requests import torch url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) # prepare image for the model inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits # rescale logits to original image size logits = nn.functional.interpolate(outputs.logits.detach().cpu(), size=image.size[::-1], # (height, width) mode='bilinear', align_corners=False) predicted = logits.argmax(1) ``` For visualization of the segmentation maps, we refer to the [example notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/SegFormer/Segformer_inference_notebook.ipynb). ## Important notes Some datasets, like [`scene_parse_150`](https://huggingface.co/datasets/scene_parse_150), contain a "background" label that is not part of the classes. The Scene Parse 150 dataset for instance contains labels between 0 and 150, with 0 being the background class, and 1 to 150 being actual class names (like "tree", "person", etc.). For these kind of datasets, one replaces the background label (0) by 255, which is the `ignore_index` of the PyTorch model's loss function, and reduces all labels by 1. This way, the `labels` are PyTorch tensors containing values between 0 and 149, and 255 for all background/padding. In case you're training on such a dataset, make sure to set the ``reduce_labels`` flag, which will take care of this.
transformers/examples/pytorch/semantic-segmentation/README.md/0
{ "file_path": "transformers/examples/pytorch/semantic-segmentation/README.md", "repo_id": "transformers", "token_count": 3408 }
278
# coding=utf-8 # Copyright 2018 HuggingFace Inc.. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock from accelerate.utils import write_basic_config from transformers.testing_utils import ( TestCasePlus, backend_device_count, run_command, slow, torch_device, ) logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f def get_results(output_dir): results = {} path = os.path.join(output_dir, "all_results.json") if os.path.exists(path): with open(path, "r") as f: results = json.load(f) else: raise ValueError(f"can't find {path}") return results stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTestsNoTrainer(TestCasePlus): @classmethod def setUpClass(cls): # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU cls.tmpdir = tempfile.mkdtemp() cls.configPath = os.path.join(cls.tmpdir, "default_config.yml") write_basic_config(save_location=cls.configPath) cls._launch_args = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_glue_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --num_warmup_steps=2 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "glue_no_trainer"))) @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_clm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilbert/distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if backend_device_count(torch_device) > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertLess(result["perplexity"], 100) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "clm_no_trainer"))) @unittest.skip("Zach is working on this.") @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_mlm_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilbert/distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertLess(result["perplexity"], 42) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "mlm_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_ner_no_trainer(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu epochs = 7 if backend_device_count(torch_device) > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertLess(result["train_loss"], 0.6) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "ner_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_squad_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"], 28) self.assertGreaterEqual(result["eval_exact"], 28) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "qa_no_trainer"))) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_swag_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.8) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "swag_no_trainer"))) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_summarization_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path google-t5/t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_rouge1"], 10) self.assertGreaterEqual(result["eval_rouge2"], 2) self.assertGreaterEqual(result["eval_rougeL"], 7) self.assertGreaterEqual(result["eval_rougeLsum"], 7) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "summarization_no_trainer"))) @slow @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_translation_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_bleu"], 30) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "epoch_0"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "translation_no_trainer"))) @slow def test_run_semantic_segmentation_no_trainer(self): stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_overall_accuracy"], 0.10) @mock.patch.dict(os.environ, {"WANDB_MODE": "offline", "DVCLIVE_TEST": "true"}) def test_run_image_classification_no_trainer(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 --label_column_name labels """.split() run_command(self._launch_args + testargs) result = get_results(tmp_dir) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"], 0.4) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "step_1"))) self.assertTrue(os.path.exists(os.path.join(tmp_dir, "image_classification_no_trainer")))
transformers/examples/pytorch/test_accelerate_examples.py/0
{ "file_path": "transformers/examples/pytorch/test_accelerate_examples.py", "repo_id": "transformers", "token_count": 6386 }
279
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning a 🤗 Transformers model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library without using a Trainer. """ import argparse import json import logging import math import os import random from pathlib import Path import datasets import evaluate import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import ClassLabel, load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, PretrainedConfig, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.39.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") # You should update this to your particular problem to have better documentation of `model_type` MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser( description="Finetune a transformers model on a text classification task (NER) with accelerate library" ) parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--text_column_name", type=str, default=None, help="The column name of text to input in the file (a csv or JSON file).", ) parser.add_argument( "--label_column_name", type=str, default=None, help="The column name of label to input in the file (a csv or JSON file).", ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--label_all_tokens", action="store_true", help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.", ) parser.add_argument( "--return_entity_level_metrics", action="store_true", help="Indication whether entity level metrics are to be returner.", ) parser.add_argument( "--task_name", type=str, default="ner", choices=["ner", "pos", "chunk"], help="The name of the task.", ) parser.add_argument( "--debug", action="store_true", help="Activate debug mode and run training only with a subset of data.", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", type=bool, default=False, help=( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option " "should only be set to `True` for repositories you trust and in which you have read the code, as it will " "execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Whether or not to enable to load a pretrained model whose head dimensions are different.", ) args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_ner_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator = ( Accelerator(log_with=args.report_to, project_dir=args.output_dir) if args.with_tracking else Accelerator() ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called # 'tokens' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file extension = args.train_file.split(".")[-1] if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.validation_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # Trim a number of training examples if args.debug: for split in raw_datasets.keys(): raw_datasets[split] = raw_datasets[split].select(range(100)) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. if raw_datasets["train"] is not None: column_names = raw_datasets["train"].column_names features = raw_datasets["train"].features else: column_names = raw_datasets["validation"].column_names features = raw_datasets["validation"].features if args.text_column_name is not None: text_column_name = args.text_column_name elif "tokens" in column_names: text_column_name = "tokens" else: text_column_name = column_names[0] if args.label_column_name is not None: label_column_name = args.label_column_name elif f"{args.task_name}_tags" in column_names: label_column_name = f"{args.task_name}_tags" else: label_column_name = column_names[1] # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. # Otherwise, we have to get the list of labels manually. labels_are_int = isinstance(features[label_column_name].feature, ClassLabel) if labels_are_int: label_list = features[label_column_name].feature.names label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(raw_datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained( args.config_name, num_labels=num_labels, trust_remote_code=args.trust_remote_code ) elif args.model_name_or_path: config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=num_labels, trust_remote_code=args.trust_remote_code ) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") tokenizer_name_or_path = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path if not tokenizer_name_or_path: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if config.model_type in {"bloom", "gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, use_fast=True, add_prefix_space=True, trust_remote_code=args.trust_remote_code ) else: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, use_fast=True, trust_remote_code=args.trust_remote_code ) if args.model_name_or_path: model = AutoModelForTokenClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ignore_mismatched_sizes=args.ignore_mismatched_sizes, trust_remote_code=args.trust_remote_code, ) else: logger.info("Training new model from scratch") model = AutoModelForTokenClassification.from_config(config, trust_remote_code=args.trust_remote_code) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Model has labels -> use them. if model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id: if sorted(model.config.label2id.keys()) == sorted(label_list): # Reorganize `label_list` to match the ordering of the model. if labels_are_int: label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)} label_list = [model.config.id2label[i] for i in range(num_labels)] else: label_list = [model.config.id2label[i] for i in range(num_labels)] label_to_id = {l: i for i, l in enumerate(label_list)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(model.config.label2id.keys())}, dataset labels:" f" {sorted(label_list)}.\nIgnoring the model labels as a result.", ) # Set the correspondences label/ID inside the model config model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = dict(enumerate(label_list)) # Map that sends B-Xxx label to its I-Xxx counterpart b_to_i_label = [] for idx, label in enumerate(label_list): if label.startswith("B-") and label.replace("B-", "I-") in label_list: b_to_i_label.append(label_list.index(label.replace("B-", "I-"))) else: b_to_i_label.append(idx) # Preprocessing the datasets. # First we tokenize all the texts. padding = "max_length" if args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], max_length=args.max_length, padding=padding, truncation=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: if args.label_all_tokens: label_ids.append(b_to_i_label[label_to_id[label[word_idx]]]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs with accelerator.main_process_first(): processed_raw_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, desc="Running tokenizer on dataset", ) train_dataset = processed_raw_datasets["train"] eval_dataset = processed_raw_datasets["validation"] # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorForTokenClassification` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorForTokenClassification( tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None) ) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Use the device given by the `accelerator` object. device = accelerator.device model.to(device) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("ner_no_trainer", experiment_config) # Metrics metric = evaluate.load("seqeval") def get_labels(predictions, references): # Transform predictions and references tensos to numpy arrays if device.type == "cpu": y_pred = predictions.detach().clone().numpy() y_true = references.detach().clone().numpy() else: y_pred = predictions.detach().cpu().clone().numpy() y_true = references.detach().cpu().clone().numpy() # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] true_labels = [ [label_list[l] for (p, l) in zip(pred, gold_label) if l != -100] for pred, gold_label in zip(y_pred, y_true) ] return true_predictions, true_labels def compute_metrics(): results = metric.compute() if args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) labels = batch["labels"] if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered predictions = accelerator.pad_across_processes(predictions, dim=1, pad_index=-100) labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100) predictions_gathered, labels_gathered = accelerator.gather((predictions, labels)) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader) - 1: predictions_gathered = predictions_gathered[: len(eval_dataloader.dataset) - samples_seen] labels_gathered = labels_gathered[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += labels_gathered.shape[0] preds, refs = get_labels(predictions_gathered, labels_gathered) metric.add_batch( predictions=preds, references=refs, ) # predictions and preferences are expected to be a nested list of labels, not label_ids eval_metric = compute_metrics() accelerator.print(f"epoch {epoch}:", eval_metric) if args.with_tracking: accelerator.log( { "seqeval": eval_metric, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) all_results = {f"eval_{k}": v for k, v in eval_metric.items()} if args.with_tracking: all_results.update({"train_loss": total_loss.item() / len(train_dataloader)}) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: # Convert all float64 & int64 type numbers to float & int for json serialization for key, value in all_results.items(): if isinstance(value, np.float64): all_results[key] = float(value) elif isinstance(value, np.int64): all_results[key] = int(value) json.dump(all_results, f) if __name__ == "__main__": main()
transformers/examples/pytorch/token-classification/run_ner_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/token-classification/run_ner_no_trainer.py", "repo_id": "transformers", "token_count": 14931 }
280
# Examples In this folder we showcase some examples to use code models for downstream tasks. ## Complexity prediction In this task we want to predict the complexity of Java programs in [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex) dataset. Using Hugging Face `trainer`, we finetuned [multilingual CodeParrot](https://huggingface.co/codeparrot/codeparrot-small-multi) and [UniXcoder](https://huggingface.co/microsoft/unixcoder-base-nine) on it, and we used the latter to build this Java complexity prediction [space](https://huggingface.co/spaces/codeparrot/code-complexity-predictor) on Hugging Face hub. To fine-tune a model on this dataset you can use the following commands: ```python python train_complexity_predictor.py \ --model_ckpt microsoft/unixcoder-base-nine \ --num_epochs 60 \ --num_warmup_steps 10 \ --batch_size 8 \ --learning_rate 5e-4 ``` ## Code generation: text to python In this task we want to train a model to generate code from english text. We finetuned Codeparrot-small on [github-jupyter-text-to-code](https://huggingface.co/datasets/codeparrot/github-jupyter-text-to-code), a dataset where the samples are a succession of docstrings and their Python code, originally extracted from Jupyter notebooks parsed in this [dataset](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed). To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: ```python accelerate launch scripts/codeparrot_training.py \ --model_ckpt codeparrot/codeparrot-small \ --dataset_name_train codeparrot/github-jupyter-text-to-code \ --dataset_name_valid codeparrot/github-jupyter-text-to-code \ --train_batch_size 12 \ --valid_batch_size 12 \ --learning_rate 5e-4 \ --num_warmup_steps 100 \ --gradient_accumulation 1 \ --gradient_checkpointing False \ --max_train_steps 3000 \ --save_checkpoint_steps 200 \ --save_dir jupyter-text-to-python ``` ## Code explanation: python to text In this task we want to train a model to explain python code. We finetuned Codeparrot-small on [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text), a dataset where the samples are a succession of Python code and its explanation as a docstring, we just inverted the order of text and code pairs in github-jupyter-code-to-text dataset and added the delimiters "Explanation:" and "End of explanation" inside the doctrings. To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: ```python accelerate launch scripts/codeparrot_training.py \ --model_ckpt codeparrot/codeparrot-small \ --dataset_name_train codeparrot/github-jupyter-code-to-text \ --dataset_name_valid codeparrot/github-jupyter-code-to-text \ --train_batch_size 12 \ --valid_batch_size 12 \ --learning_rate 5e-4 \ --num_warmup_steps 100 \ --gradient_accumulation 1 \ --gradient_checkpointing False \ --max_train_steps 3000 \ --save_checkpoint_steps 200 \ --save_dir jupyter-python-to-text ```
transformers/examples/research_projects/codeparrot/examples/README.md/0
{ "file_path": "transformers/examples/research_projects/codeparrot/examples/README.md", "repo_id": "transformers", "token_count": 1170 }
281
import gym import numpy as np import torch from mujoco_py import GlfwContext from transformers import DecisionTransformerModel GlfwContext(offscreen=True) # Create a window to init GLFW. def get_action(model, states, actions, rewards, returns_to_go, timesteps): # we don't care about the past rewards in this model states = states.reshape(1, -1, model.config.state_dim) actions = actions.reshape(1, -1, model.config.act_dim) returns_to_go = returns_to_go.reshape(1, -1, 1) timesteps = timesteps.reshape(1, -1) if model.config.max_length is not None: states = states[:, -model.config.max_length :] actions = actions[:, -model.config.max_length :] returns_to_go = returns_to_go[:, -model.config.max_length :] timesteps = timesteps[:, -model.config.max_length :] # pad all tokens to sequence length attention_mask = torch.cat( [torch.zeros(model.config.max_length - states.shape[1]), torch.ones(states.shape[1])] ) attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1) states = torch.cat( [ torch.zeros( (states.shape[0], model.config.max_length - states.shape[1], model.config.state_dim), device=states.device, ), states, ], dim=1, ).to(dtype=torch.float32) actions = torch.cat( [ torch.zeros( (actions.shape[0], model.config.max_length - actions.shape[1], model.config.act_dim), device=actions.device, ), actions, ], dim=1, ).to(dtype=torch.float32) returns_to_go = torch.cat( [ torch.zeros( (returns_to_go.shape[0], model.config.max_length - returns_to_go.shape[1], 1), device=returns_to_go.device, ), returns_to_go, ], dim=1, ).to(dtype=torch.float32) timesteps = torch.cat( [ torch.zeros( (timesteps.shape[0], model.config.max_length - timesteps.shape[1]), device=timesteps.device ), timesteps, ], dim=1, ).to(dtype=torch.long) else: attention_mask = None _, action_preds, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) return action_preds[0, -1] # build the environment env = gym.make("Hopper-v3") state_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] max_ep_len = 1000 device = "cuda" scale = 1000.0 # normalization for rewards/returns TARGET_RETURN = 3600 / scale # evaluation conditioning targets, 3600 is reasonable from the paper LINK state_mean = np.array( [ 1.311279, -0.08469521, -0.5382719, -0.07201576, 0.04932366, 2.1066856, -0.15017354, 0.00878345, -0.2848186, -0.18540096, -0.28461286, ] ) state_std = np.array( [ 0.17790751, 0.05444621, 0.21297139, 0.14530419, 0.6124444, 0.85174465, 1.4515252, 0.6751696, 1.536239, 1.6160746, 5.6072536, ] ) state_mean = torch.from_numpy(state_mean).to(device=device) state_std = torch.from_numpy(state_std).to(device=device) # Create the decision transformer model model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium") model = model.to(device) model.eval() for ep in range(10): episode_return, episode_length = 0, 0 state = env.reset() target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1) states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32) actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32) rewards = torch.zeros(0, device=device, dtype=torch.float32) timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1) for t in range(max_ep_len): env.render() # add padding actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0) rewards = torch.cat([rewards, torch.zeros(1, device=device)]) action = get_action( model, (states.to(dtype=torch.float32) - state_mean) / state_std, actions.to(dtype=torch.float32), rewards.to(dtype=torch.float32), target_return.to(dtype=torch.float32), timesteps.to(dtype=torch.long), ) actions[-1] = action action = action.detach().cpu().numpy() state, reward, done, _ = env.step(action) cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim) states = torch.cat([states, cur_state], dim=0) rewards[-1] = reward pred_return = target_return[0, -1] - (reward / scale) target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1) timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1) episode_return += reward episode_length += 1 if done: break
transformers/examples/research_projects/decision_transformer/run_decision_transformer.py/0
{ "file_path": "transformers/examples/research_projects/decision_transformer/run_decision_transformer.py", "repo_id": "transformers", "token_count": 2763 }
282
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is the exact same script as `examples/question-answering/run_squad.py` (as of 2020, January 8th) with an additional and optional step of distillation.""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForQuestionAnswering, BertTokenizer, DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer, RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer, XLMConfig, XLMForQuestionAnswering, XLMTokenizer, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), "xlnet": (XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizer), "xlm": (XLMConfig, XLMForQuestionAnswering, XLMTokenizer), "distilbert": (DistilBertConfig, DistilBertForQuestionAnswering, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForQuestionAnswering, RobertaTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to global_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproducibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() if teacher is not None: teacher.eval() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type != "distilbert": inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) outputs = model(**inputs) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: if "token_type_ids" not in inputs: inputs["token_type_ids"] = None if args.teacher_type == "xlm" else batch[2] with torch.no_grad(): start_logits_tea, end_logits_tea = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) assert start_logits_tea.size() == start_logits_stu.size() assert end_logits_tea.size() == end_logits_stu.size() loss_fct = nn.KLDivLoss(reduction="batchmean") loss_start = loss_fct( nn.functional.log_softmax(start_logits_stu / args.temperature, dim=-1), nn.functional.softmax(start_logits_tea / args.temperature, dim=-1), ) * (args.temperature**2) loss_end = loss_fct( nn.functional.log_softmax(end_logits_stu / args.temperature, dim=-1), nn.functional.softmax(end_logits_tea / args.temperature, dim=-1), ) * (args.temperature**2) loss_ce = (loss_start + loss_end) / 2.0 loss = args.alpha_ce * loss_ce + args.alpha_squad * loss if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1]} if args.model_type != "distilbert": inputs["token_type_ids"] = None if args.model_type == "xlm" else batch[2] # XLM don't use segment_ids example_indices = batch[3] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None if args.model_type in ["xlnet", "xlm"]: # XLNet uses a more complex post-processing procedure predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, model.config.start_n_top, model.config.end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_file = args.predict_file if evaluate else args.train_file cached_features_file = os.path.join( os.path.dirname(input_file), "cached_distillation_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) try: features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) except KeyError: raise DeprecationWarning( "You seem to be loading features from an older version of this script please delete the " "file %s in order for it to be created again" % cached_features_file ) else: logger.info("Creating features from dataset file at %s", input_file) processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help=( "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" " distillation." ), ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_squad", default=0.5, type=float, help="True SQuAD loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_ce > 0.0 assert args.alpha_ce + args.alpha_squad > 0.0 assert args.teacher_type != "distilbert", "We constraint teachers not to be of type DistilBERT." teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained( args.teacher_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None ) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
transformers/examples/research_projects/distillation/run_squad_w_distillation.py/0
{ "file_path": "transformers/examples/research_projects/distillation/run_squad_w_distillation.py", "repo_id": "transformers", "token_count": 15430 }
283
from .model import FSNERModel from .tokenizer_utils import FSNERTokenizerUtils __all__ = ["FSNERModel", "FSNERTokenizerUtils"]
transformers/examples/research_projects/fsner/src/fsner/__init__.py/0
{ "file_path": "transformers/examples/research_projects/fsner/src/fsner/__init__.py", "repo_id": "transformers", "token_count": 44 }
284
command: - python3 - train.py method: random parameters: lr: values: [4e-5, 3e-5] warmup_steps: values: [20000, 15000, 10000, 5000] weight_decay: distribution: normal mu: 1e-2 sigma: 2e-3 metric: name: eval_loss goal: minimize
transformers/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml/0
{ "file_path": "transformers/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml", "repo_id": "transformers", "token_count": 222 }
285
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning LayoutLMv3 for token classification on FUNSD or CORD. """ # You can also adapt this script on your own token classification task and datasets. Pointers for this are left as # comments. import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import ClassLabel, load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoProcessor, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.data.data_collator import default_data_collator from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.19.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="microsoft/layoutlmv3-base", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) processor_name: Optional[str] = field( default=None, metadata={"help": "Name or path to the processor files if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default="nielsr/funsd-layoutlmv3", metadata={"help": "The name of the dataset to use (via the datasets library)."}, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=512, metadata={ "help": ( "The maximum total input sequence length after tokenization. If set, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) label_all_tokens: bool = field( default=False, metadata={ "help": ( "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." ) }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name == "funsd": # Downloading and loading a dataset from the hub. dataset = load_dataset( "nielsr/funsd-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name == "cord": # Downloading and loading a dataset from the hub. dataset = load_dataset( "nielsr/cord-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=True if model_args.use_auth_token else None, ) else: raise ValueError("This script only supports either FUNSD or CORD out-of-the-box.") if training_args.do_train: column_names = dataset["train"].column_names features = dataset["train"].features else: column_names = dataset["test"].column_names features = dataset["test"].features image_column_name = "image" text_column_name = "words" if "words" in column_names else "tokens" boxes_column_name = "bboxes" label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. # Otherwise, we have to get the list of labels manually. if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} else: label_list = get_label_list(datasets["train"][label_column_name]) id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) processor = AutoProcessor.from_pretrained( model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, add_prefix_space=True, apply_ocr=False, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Set the correspondences label/ID inside the model config model.config.label2id = label2id model.config.id2label = id2label # Preprocessing the dataset # The processor does everything for us (prepare the image using LayoutLMv3ImageProcessor # and prepare the words, boxes and word-level labels using LayoutLMv3TokenizerFast) def prepare_examples(examples): images = examples[image_column_name] words = examples[text_column_name] boxes = examples[boxes_column_name] word_labels = examples[label_column_name] encoding = processor( images, words, boxes=boxes, word_labels=word_labels, truncation=True, padding="max_length", max_length=data_args.max_seq_length, ) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") train_dataset = dataset["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: validation_name = "test" if validation_name not in dataset: raise ValueError("--do_eval requires a validation dataset") eval_dataset = dataset[validation_name] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor, data_collator=default_data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # Save predictions output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") if trainer.is_world_process_zero(): with open(output_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/research_projects/layoutlmv3/run_funsd_cord.py/0
{ "file_path": "transformers/examples/research_projects/layoutlmv3/run_funsd_cord.py", "repo_id": "transformers", "token_count": 8703 }
286
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Whole Word Mask Language Model These scripts leverage the 🤗 Datasets library and the Trainer API. You can easily customize them to your needs if you need extra processing on your datasets. The following examples, will run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation. We give examples of both below. The BERT authors released a new version of BERT using Whole Word Masking in May 2019. Instead of masking randomly selected tokens (which may be part of words), they mask randomly selected words (masking all the tokens corresponding to that word). This technique has been refined for Chinese in [this paper](https://arxiv.org/abs/1906.08101). To fine-tune a model using whole word masking, use the following script: ```bash python run_mlm_wwm.py \ --model_name_or_path FacebookAI/roberta-base \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm-wwm ``` For Chinese models, we need to generate a reference files (which requires the ltp library), because it's tokenized at the character level. **Q :** Why a reference file? **A :** Suppose we have a Chinese sentence like: `我喜欢你` The original Chinese-BERT will tokenize it as `['我','喜','欢','你']` (character level). But `喜欢` is a whole word. For whole word masking proxy, we need a result like `['我','喜','##欢','你']`, so we need a reference file to tell the model which position of the BERT original token should be added `##`. **Q :** Why LTP ? **A :** Cause the best known Chinese WWM BERT is [Chinese-BERT-wwm](https://github.com/ymcui/Chinese-BERT-wwm) by HIT. It works well on so many Chines Task like CLUE (Chinese GLUE). They use LTP, so if we want to fine-tune their model, we need LTP. You could run the following: ```bash export TRAIN_FILE=/path/to/train/file export LTP_RESOURCE=/path/to/ltp/tokenizer export BERT_RESOURCE=/path/to/bert/tokenizer export SAVE_PATH=/path/to/data/ref.txt python run_chinese_ref.py \ --file_name=$TRAIN_FILE \ --ltp=$LTP_RESOURCE \ --bert=$BERT_RESOURCE \ --save_path=$SAVE_PATH ``` Then you can run the script like this: ```bash export TRAIN_FILE=/path/to/train/file export VALIDATION_FILE=/path/to/validation/file export TRAIN_REF_FILE=/path/to/train/chinese_ref/file export VALIDATION_REF_FILE=/path/to/validation/chinese_ref/file export OUTPUT_DIR=/tmp/test-mlm-wwm python run_mlm_wwm.py \ --model_name_or_path FacebookAI/roberta-base \ --train_file $TRAIN_FILE \ --validation_file $VALIDATION_FILE \ --train_ref_file $TRAIN_REF_FILE \ --validation_ref_file $VALIDATION_REF_FILE \ --do_train \ --do_eval \ --output_dir $OUTPUT_DIR ``` **Note1:** On TPU, you should the flag `--pad_to_max_length` to make sure all your batches have the same length. **Note2:** And if you have any questions or something goes wrong when running this code, don't hesitate to pin @wlhgtc.
transformers/examples/research_projects/mlm_wwm/README.md/0
{ "file_path": "transformers/examples/research_projects/mlm_wwm/README.md", "repo_id": "transformers", "token_count": 1192 }
287
# coding=utf-8 # Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Masked Linear module: A fully connected layer that computes an adaptive binary mask on the fly. The mask (binary or not) is computed at each forward pass and multiplied against the weight matrix to prune a portion of the weights. The pruned weight matrix is then multiplied against the inputs (and if necessary, the bias is added). """ import math import torch from torch import nn from torch.nn import init from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer class MaskedLinear(nn.Linear): """ Fully Connected layer with on the fly adaptive mask. If needed, a score matrix is created to store the importance of each associated weight. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, mask_init: str = "constant", mask_scale: float = 0.0, pruning_method: str = "topK", ): """ Args: in_features (`int`) Size of each input sample out_features (`int`) Size of each output sample bias (`bool`) If set to ``False``, the layer will not learn an additive bias. Default: ``True`` mask_init (`str`) The initialization method for the score matrix if a score matrix is needed. Choices: ["constant", "uniform", "kaiming"] Default: ``constant`` mask_scale (`float`) The initialization parameter for the chosen initialization method `mask_init`. Default: ``0.`` pruning_method (`str`) Method to compute the mask. Choices: ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] Default: ``topK`` """ super(MaskedLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias) assert pruning_method in ["topK", "threshold", "sigmoied_threshold", "magnitude", "l0"] self.pruning_method = pruning_method if self.pruning_method in ["topK", "threshold", "sigmoied_threshold", "l0"]: self.mask_scale = mask_scale self.mask_init = mask_init self.mask_scores = nn.Parameter(torch.empty(self.weight.size())) self.init_mask() def init_mask(self): if self.mask_init == "constant": init.constant_(self.mask_scores, val=self.mask_scale) elif self.mask_init == "uniform": init.uniform_(self.mask_scores, a=-self.mask_scale, b=self.mask_scale) elif self.mask_init == "kaiming": init.kaiming_uniform_(self.mask_scores, a=math.sqrt(5)) def forward(self, input: torch.tensor, threshold: float): # Get the mask if self.pruning_method == "topK": mask = TopKBinarizer.apply(self.mask_scores, threshold) elif self.pruning_method in ["threshold", "sigmoied_threshold"]: sig = "sigmoied" in self.pruning_method mask = ThresholdBinarizer.apply(self.mask_scores, threshold, sig) elif self.pruning_method == "magnitude": mask = MagnitudeBinarizer.apply(self.weight, threshold) elif self.pruning_method == "l0": l, r, b = -0.1, 1.1, 2 / 3 if self.training: u = torch.zeros_like(self.mask_scores).uniform_().clamp(0.0001, 0.9999) s = torch.sigmoid((u.log() - (1 - u).log() + self.mask_scores) / b) else: s = torch.sigmoid(self.mask_scores) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) # Mask weights with computed mask weight_thresholded = mask * self.weight # Compute output (linear layer) with masked weights return nn.functional.linear(input, weight_thresholded, self.bias)
transformers/examples/research_projects/movement-pruning/emmental/modules/masked_nn.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/emmental/modules/masked_nn.py", "repo_id": "transformers", "token_count": 1917 }
288
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex logger = logging.getLogger(__name__) class RayRetriever: def __init__(self): self.initialized = False def create_rag_retriever(self, config, question_encoder_tokenizer, generator_tokenizer, index): if not self.initialized: self.retriever = RagRetriever( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, init_retrieval=False, ) self.initialized = True def init_retrieval(self): self.retriever.index.init_index() def clear_object(self): # delete the old self.retriever object before assigning the new index del self.retriever self.initialized = False def retrieve(self, question_hidden_states, n_docs): doc_ids, retrieved_doc_embeds = self.retriever._main_retrieve(question_hidden_states, n_docs) doc_dicts = self.retriever.index.get_doc_dicts(doc_ids) return doc_ids, retrieved_doc_embeds, doc_dicts class RagRayDistributedRetriever(RagRetriever): """ A distributed retriever built on top of the ``Ray`` API, a library for building distributed applications (https://docs.ray.io/en/master/). package. During training, all training workers initialize their own instance of a `RagRayDistributedRetriever`, and each instance of this distributed retriever shares a common set of Retrieval Ray Actors (https://docs.ray.io/en/master/walkthrough.html#remote -classes-actors) that load the index on separate processes. Ray handles the communication between the `RagRayDistributedRetriever` instances and the remote Ray actors. If training is done in a non-distributed setup, the index will simply be loaded in the same process as the training worker and Ray will not be used. Args: config (:class:`~transformers.RagConfig`): The configuration of the RAG model this Retriever is used with. Contains parameters indicating which ``Index`` to build. question_encoder_tokenizer (:class:`~transformers.PreTrainedTokenizer`): The tokenizer that was used to tokenize the question. It is used to decode the question and then use the generator_tokenizer. generator_tokenizer (:class:`~transformers.PreTrainedTokenizer`): The tokenizer used for the generator part of the RagModel. retrieval_workers (:obj:`List[ray.ActorClass(RayRetriever)]`): A list of already initialized `RayRetriever` actors. These actor classes run on remote processes and are responsible for performing the index lookup. index (:class:`~transformers.retrieval_rag.Index`, optional, defaults to the one defined by the configuration): If specified, use this index instead of the one built using the configuration """ def __init__(self, config, question_encoder_tokenizer, generator_tokenizer, retrieval_workers, index=None): if index is not None and index.is_initialized() and len(retrieval_workers) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, index=index, init_retrieval=False, ) self.retrieval_workers = retrieval_workers self.question_encoder_tokenizer = question_encoder_tokenizer self.generator_tokenizer = generator_tokenizer if len(self.retrieval_workers) > 0: ray.get( [ worker.create_rag_retriever.remote(config, question_encoder_tokenizer, generator_tokenizer, index) for worker in self.retrieval_workers ] ) def init_retrieval(self): """ Retriever initialization function, needs to be called from the training process. This function triggers retrieval initialization for all retrieval actors if using distributed setting, or loads index into current process if training is not distributed. """ logger.info("initializing retrieval") if len(self.retrieval_workers) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers]) else: # Non-distributed training. Load index into this same process. self.index.init_index() def retrieve(self, question_hidden_states, n_docs): """ Retrieves documents for specified ``question_hidden_states``. If running training with multiple workers, a random retrieval actor is selected to perform the index lookup and return the result. Args: question_hidden_states (:obj:`np.ndarray` of shape :obj:`(batch_size, vector_size)`): A batch of query vectors to retrieve with. n_docs (:obj:`int`): The number of docs retrieved per query. Output: retrieved_doc_embeds (:obj:`np.ndarray` of shape :obj:`(batch_size, n_docs, dim)` The retrieval embeddings of the retrieved docs per query. doc_ids (:obj:`np.ndarray` of shape :obj:`batch_size, n_docs`) The ids of the documents in the index doc_dicts (:obj:`List[dict]`): The retrieved_doc_embeds examples per query. """ if len(self.retrieval_workers) > 0: # Select a random retrieval actor. random_worker = self.retrieval_workers[random.randint(0, len(self.retrieval_workers) - 1)] doc_ids, retrieved_doc_embeds, doc_dicts = ray.get( random_worker.retrieve.remote(question_hidden_states, n_docs) ) else: doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs) doc_dicts = self.index.get_doc_dicts(doc_ids) return retrieved_doc_embeds, doc_ids, doc_dicts @classmethod def get_tokenizers(cls, retriever_name_or_path, indexed_dataset=None, **kwargs): return super(RagRayDistributedRetriever, cls).get_tokenizers(retriever_name_or_path, indexed_dataset, **kwargs) @classmethod def from_pretrained(cls, retriever_name_or_path, actor_handles, indexed_dataset=None, **kwargs): config = kwargs.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kwargs) rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config) question_encoder_tokenizer = rag_tokenizer.question_encoder generator_tokenizer = rag_tokenizer.generator if indexed_dataset is not None: config.index_name = "custom" index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset) else: index = cls._build_index(config) return cls( config, question_encoder_tokenizer=question_encoder_tokenizer, generator_tokenizer=generator_tokenizer, retrieval_workers=actor_handles, index=index, ) def re_load(self): logger.info("re-loading the new dataset with embeddings") # access from the training loop ray.get([worker.clear_object.remote() for worker in self.retrieval_workers]) # build the index object again index = self._build_index(self.config) ray.get( [ worker.create_rag_retriever.remote( self.config, self.question_encoder_tokenizer, self.generator_tokenizer, index ) for worker in self.retrieval_workers ] )
transformers/examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/distributed_ray_retriever.py", "repo_id": "transformers", "token_count": 3358 }
289
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser logger = logging.getLogger(__name__) torch.set_grad_enabled(False) device = "cuda" if torch.cuda.is_available() else "cpu" def split_text(text: str, n=100, character=" ") -> List[str]: """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents: dict) -> dict: """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def main( rag_example_args: "RagExampleArguments", processing_args: "ProcessingArguments", index_hnsw_args: "IndexHnswArguments", ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way dataset = load_dataset( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets?highlight=csv#csv-files # Then split the documents into passages of 100 words dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) # And compute the embeddings ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device) ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = dataset.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), batched=True, batch_size=processing_args.batch_size, features=new_features, ) # And finally save your dataset passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset") dataset.save_to_disk(passages_path) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings", custom_index=index) # And save the index index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(index_path) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class RagExampleArguments: csv_path: str = field( default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv"), metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, ) question: Optional[str] = field( default=None, metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, ) rag_model_name: str = field( default="facebook/rag-sequence-nq", metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, ) dpr_ctx_encoder_model_name: str = field( default="facebook/dpr-ctx_encoder-multiset-base", metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) }, ) output_dir: Optional[str] = field( default=str(Path(__file__).parent / "test_run" / "dummy-kb"), metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, ) @dataclass class ProcessingArguments: num_proc: Optional[int] = field( default=None, metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." }, ) batch_size: int = field( default=16, metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." }, ) @dataclass class IndexHnswArguments: d: int = field( default=768, metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, ) m: int = field( default=128, metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) }, ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
transformers/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py", "repo_id": "transformers", "token_count": 2578 }
290
import argparse import logging import os import sys import tempfile from pathlib import Path import lightning_base import pytest import pytorch_lightning as pl import torch from convert_pl_checkpoint_to_hf import convert_pl_to_hf from distillation import distill_main from finetune import SummarizationModule, main from huggingface_hub import list_models from parameterized import parameterized from run_eval import generate_summaries_or_translations from torch import nn from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import label_smoothed_nll_loss, lmap, load_json logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() CUDA_AVAILABLE = torch.cuda.is_available() CHEAP_ARGS = { "max_tokens_per_batch": None, "supervise_forward": True, "normalize_hidden": True, "label_smoothing": 0.2, "eval_max_gen_length": None, "eval_beams": 1, "val_metric": "loss", "save_top_k": 1, "adafactor": True, "early_stopping_patience": 2, "logger_name": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "accumulate_grad_batches": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "lr_scheduler": "linear", "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "max_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "freeze_encoder": False, "auto_scale_batch_size": False, "overwrite_output_dir": False, "student": None, } def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" T5_TINIER = "sshleifer/t5-tinier-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" FSMT_TINY = "stas/tiny-wmt19-en-de" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistiller(TestCasePlus): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks return cls @slow @require_torch_gpu def test_hub_configs(self): """I put require_torch_gpu cause I only want this to run with self-scheduled.""" model_list = list_models() org = "sshleifer" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"] failures = [] for m in model_ids: if m in allowed_to_be_broken: continue try: AutoConfig.from_pretrained(m) except Exception: failures.append(m) assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" def test_distill_no_teacher(self): updates = {"student_encoder_layers": 2, "student_decoder_layers": 1, "no_teacher": True} self._test_distiller_cli(updates) def test_distill_checkpointing_with_teacher(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "max_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", } model = self._test_distiller_cli(updates, check_contents=False) ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) self.assertEqual(len(transformer_ckpts), 2) examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines()) out_path = tempfile.mktemp() # XXX: not being cleaned up generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) self.assertTrue(Path(out_path).exists()) out_path_new = self.get_auto_remove_tmp_dir() convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) def test_loss_fn(self): model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device) decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line? lm_labels = target_ids[:, 1:].clone() # why clone? model_computed_loss = model( input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False ).loss logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits lprobs = nn.functional.log_softmax(logits, dim=-1) smoothed_loss, nll_loss = label_smoothed_nll_loss( lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id ) with self.assertRaises(AssertionError): # TODO: understand why this breaks self.assertEqual(nll_loss, model_computed_loss) def test_distill_mbart(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "num_train_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "task": "translation", "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", "tokenizer_name": MBART_TINY, "teacher": MBART_TINY, "src_lang": "en_XX", "tgt_lang": "ro_RO", } model = self._test_distiller_cli(updates, check_contents=False) assert model.model.config.model_type == "mbart" ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) all_files = list(Path(model.output_dir).glob("best_tfmr/*")) assert len(all_files) > 2 self.assertEqual(len(transformer_ckpts), 2) def test_distill_t5(self): updates = { "student_encoder_layers": 1, "student_decoder_layers": 1, "alpha_hid": 2.0, "teacher": T5_TINY, "model_name_or_path": T5_TINY, "tokenizer_name": T5_TINY, } self._test_distiller_cli(updates) def test_distill_different_base_models(self): updates = { "teacher": T5_TINY, "student": T5_TINIER, "model_name_or_path": T5_TINIER, "tokenizer_name": T5_TINIER, } self._test_distiller_cli(updates) def _test_distiller_cli(self, updates, check_contents=True): default_updates = { "label_smoothing": 0.0, "early_stopping_patience": -1, "train_batch_size": 1, "eval_batch_size": 2, "max_epochs": 2, "alpha_mlm": 0.2, "alpha_ce": 0.8, "do_predict": True, "model_name_or_path": "sshleifer/tinier_bart", "teacher": CHEAP_ARGS["model_name_or_path"], "val_check_interval": 0.5, } default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) model = distill_main(argparse.Namespace(**args_d)) if not check_contents: return model contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} ckpt_files = [p for p in contents if p.endswith("ckpt")] assert len(ckpt_files) > 0 self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) metrics = load_json(model.metrics_save_path) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1) self.assertEqual(len(metrics["val"]), desired_n_evals) self.assertEqual(len(metrics["test"]), 1) return model class TestTheRest(TestCasePlus): @parameterized.expand( [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], ) def test_finetune(self, model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" args_d["label_smoothing"] = 0.1 if task == "translation" else 0 tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, output_dir=output_dir, do_predict=True, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) assert "n_train" in args_d args = argparse.Namespace(**args_d) module = main(args) input_embeds = module.model.get_input_embeddings() assert not input_embeds.weight.requires_grad if model == T5_TINY: lm_head = module.model.lm_head assert not lm_head.weight.requires_grad assert (lm_head.weight == input_embeds.weight).all().item() elif model == FSMT_TINY: fsmt = module.model.model embed_pos = fsmt.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not fsmt.decoder.embed_tokens.weight.requires_grad # check that embeds are not the same assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens else: bart = module.model.model embed_pos = bart.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not bart.shared.weight.requires_grad # check that embeds are the same assert bart.decoder.embed_tokens == bart.encoder.embed_tokens assert bart.decoder.embed_tokens == bart.shared example_batch = load_json(module.output_dir / "text_batch.json") assert isinstance(example_batch, dict) assert len(example_batch) >= 4 def test_finetune_extra_model_args(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) args_d.update( data_dir=tmp_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # test models whose config includes the extra_model_args model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d1 = args_d.copy() args_d1.update( model_name_or_path=model, output_dir=output_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: args_d1[p] = 0.5 args = argparse.Namespace(**args_d1) model = main(args) for p in extra_model_params: assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}" # test models whose config doesn't include the extra_model_args model = T5_TINY output_dir = self.get_auto_remove_tmp_dir() args_d2 = args_d.copy() args_d2.update( model_name_or_path=model, output_dir=output_dir, ) unsupported_param = "encoder_layerdrop" args_d2[unsupported_param] = 0.5 args = argparse.Namespace(**args_d2) with pytest.raises(Exception) as excinfo: model = main(args) assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" def test_finetune_lr_schedulers(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, output_dir=output_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # emulate finetune.py parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = {"--help": True} # --help test with pytest.raises(SystemExit) as excinfo: with CaptureStdout() as cs: args = parser.parse_args(args) assert False, "--help is expected to sys.exit" assert excinfo.type == SystemExit expected = lightning_base.arg_to_scheduler_metavar assert expected in cs.out, "--help is expected to list the supported schedulers" # --lr_scheduler=non_existing_scheduler test unsupported_param = "non_existing_scheduler" args = {f"--lr_scheduler={unsupported_param}"} with pytest.raises(SystemExit) as excinfo: with CaptureStderr() as cs: args = parser.parse_args(args) assert False, "invalid argument is expected to sys.exit" assert excinfo.type == SystemExit expected = f"invalid choice: '{unsupported_param}'" assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}" # --lr_scheduler=existing_scheduler test supported_param = "cosine" args_d1 = args_d.copy() args_d1["lr_scheduler"] = supported_param args = argparse.Namespace(**args_d1) model = main(args) assert ( getattr(model.hparams, "lr_scheduler") == supported_param ), f"lr_scheduler={supported_param} shouldn't fail"
transformers/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py", "repo_id": "transformers", "token_count": 7908 }
291
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-base-100h" \ --num_train_epochs="30" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --evaluation_strategy="steps" \ --save_total_limit="3" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-base" \ --fp16 \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="32" \ --group_by_length \ --freeze_feature_extractor
transformers/examples/research_projects/wav2vec2/finetune_base_100.sh/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/finetune_base_100.sh", "repo_id": "transformers", "token_count": 249 }
292
# Zero-shot classifier distillation Author: @joeddav This script provides a way to improve the speed and memory performance of a zero-shot classifier by training a more efficient student model from the zero-shot teacher's predictions over an unlabeled dataset. The zero-shot classification pipeline uses a model pre-trained on natural language inference (NLI) to determine the compatibility of a set of candidate class names with a given sequence. This serves as a convenient out-of-the-box classifier without the need for labeled training data. However, for a given sequence, the method requires each possible label to be fed through the large NLI model separately. Thus for `N` sequences and `K` classes, a total of `N*K` forward passes through the model are required. This requirement slows inference considerably, particularly as `K` grows. Given (1) an unlabeled corpus and (2) a set of candidate class names, the provided script trains a student model with a standard classification head with `K` output dimensions. The resulting student model can then be used for classifying novel text instances with a significant boost in speed and memory performance while retaining similar classification performance to the original zero-shot model ### Usage A teacher NLI model can be distilled to a more efficient student model by running [`distill_classifier.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/zero-shot-distillation/distill_classifier.py): ```bash python distill_classifier.py \ --data_file <unlabeled_data.txt> \ --class_names_file <class_names.txt> \ --output_dir <output_dir> ``` `<unlabeled_data.txt>` should be a text file with a single unlabeled example per line. `<class_names.txt>` is a text file with one class name per line. Other optional arguments include: - `--teacher_name_or_path` (default: `roberta-large-mnli`): The name or path of the NLI teacher model. - `--student_name_or_path` (default: `distillbert-base-uncased`): The name or path of the student model which will be fine-tuned to copy the teacher predictions. - `--hypothesis_template` (default `"This example is {}."`): The template used to turn each label into an NLI-style hypothesis when generating teacher predictions. This template must include a `{}` or similar syntax for the candidate label to be inserted into the template. For example, the default template is `"This example is {}."` With the candidate label `sports`, this would be fed into the model like `[CLS] sequence to classify [SEP] This example is sports . [SEP]`. - `--multi_class`: Whether or not multiple candidate labels can be true. By default, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If `--multi_class` is passed, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score. This is sometimes called "multi-class multi-label" classification. - `--temperature` (default: `1.0`): The temperature applied to the softmax of the teacher model predictions. A higher temperature results in a student with smoother (lower confidence) predictions than the teacher while a value `<1` resultings in a higher-confidence, peaked distribution. The default `1.0` is equivalent to no smoothing. - `--teacher_batch_size` (default: `32`): The batch size used for generating a single set of teacher predictions. Does not affect training. Use `--per_device_train_batch_size` to change the training batch size. Any of the arguments in the 🤗 Trainer's [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html?#trainingarguments) can also be modified, such as `--learning_rate`, `--fp16`, `--no_cuda`, `--warmup_steps`, etc. Run `python distill_classifier.py -h` for a full list of available arguments or consult the [Trainer documentation](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments). > **Note**: Distributed and TPU training are not currently supported. Single-node multi-GPU is supported, however, and will run automatically if multiple GPUs are available. ### Example: Topic classification > A full colab demo notebook of this example can be found [here](https://colab.research.google.com/drive/1mjBjd0cR8G57ZpsnFCS3ngGyo5nCa9ya?usp=sharing). Let's say we're interested in classifying news articles into one of four topic categories: "the world", "sports", "business", or "science/tech". We have an unlabeled dataset, [AG's News](https://huggingface.co/datasets/ag_news), which corresponds to this problem (in reality AG's News is annotated, but we will pretend it is not for the sake of example). We can use an NLI model like `roberta-large-mnli` for zero-shot classification like so: ```python >>> class_names = ["the world", "sports", "business", "science/tech"] >>> hypothesis_template = "This text is about {}." >>> sequence = "A new moon has been discovered in Jupiter's orbit" >>> zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli") >>> zero_shot_classifier(sequence, class_names, hypothesis_template=hypothesis_template) {'sequence': "A new moon has been discovered in Jupiter's orbit", 'labels': ['science/tech', 'the world', 'business', 'sports'], 'scores': [0.7035840153694153, 0.18744826316833496, 0.06027870625257492, 0.04868902638554573]} ``` Unfortunately, inference is slow since each of our 4 class names must be fed through the large model for every sequence to be classified. But with our unlabeled data we can distill the model to a small distilbert classifier to make future inference much faster. To run the script, we will need to put each training example (text only) from AG's News on its own line in `agnews/train_unlabeled.txt`, and each of the four class names in the newline-separated `agnews/class_names.txt`. Then we can run distillation with the following command: ```bash python distill_classifier.py \ --data_file ./agnews/unlabeled.txt \ --class_names_files ./agnews/class_names.txt \ --teacher_name_or_path roberta-large-mnli \ --hypothesis_template "This text is about {}." \ --output_dir ./agnews/distilled ``` The script will generate a set of soft zero-shot predictions from `roberta-large-mnli` for each example in `agnews/unlabeled.txt`. It will then train a student distilbert classifier on the teacher predictions and save the resulting model in `./agnews/distilled`. The resulting model can then be loaded and used like any other pre-trained classifier: ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("./agnews/distilled") tokenizer = AutoTokenizer.from_pretrained("./agnews/distilled") ``` and even used trivially with a `TextClassificationPipeline`: ```python >>> distilled_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True) >>> distilled_classifier(sequence) [[{'label': 'the world', 'score': 0.14899294078350067}, {'label': 'sports', 'score': 0.03205857425928116}, {'label': 'business', 'score': 0.05943061783909798}, {'label': 'science/tech', 'score': 0.7595179080963135}]] ``` > Tip: pass `device=0` when constructing a pipeline to run on a GPU As we can see, the results of the student closely resemble that of the trainer despite never having seen this example during training. Now let's do a quick & dirty speed comparison simulating 16K examples with a batch size of 16: ```python for _ in range(1000): zero_shot_classifier([sequence] * 16, class_names) # runs in 1m 23s on a single V100 GPU ``` ```python %%time for _ in range(1000): distilled_classifier([sequence] * 16) # runs in 10.3s on a single V100 GPU ``` As we can see, the distilled student model runs an order of magnitude faster than its teacher NLI model. This is also a seeting where we only have `K=4` possible labels. The higher the number of classes for a given task, the more drastic the speedup will be, since the zero-shot teacher's complexity scales linearly with the number of classes. Since we secretly have access to ground truth labels for AG's news, we can evaluate the accuracy of each model. The original zero-shot model `roberta-large-mnli` gets an accuracy of 69.3% on the held-out test set. After training a student on the unlabeled training set, the distilled model gets a similar score of 70.4%. Lastly, you can share the distilled model with the community and/or use it with our inference API by [uploading it to the 🤗 Hub](https://huggingface.co/transformers/model_sharing.html). We've uploaded the distilled model from this example at [joeddav/distilbert-base-uncased-agnews-student](https://huggingface.co/joeddav/distilbert-base-uncased-agnews-student).
transformers/examples/research_projects/zero-shot-distillation/README.md/0
{ "file_path": "transformers/examples/research_projects/zero-shot-distillation/README.md", "repo_id": "transformers", "token_count": 2467 }
293
#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script for preparing TFRecord shards for pre-tokenized examples.""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name", type=str, default="wikitext", help="Name of the training. Explore datasets at: hf.co/datasets.", ) parser.add_argument( "--dataset_config", type=str, default="wikitext-103-raw-v1", help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path", type=str, default="sayakpaul/unigram-tokenizer-wikitext", help="Tokenizer identifier. Can be a local filepath or a Hub identifier.", ) parser.add_argument( "--shard_size", type=int, default=1000, help="Number of entries to go in a single shard.", ) parser.add_argument("--split", type=str, default="train", choices=["train", "test", "validation"]) parser.add_argument( "--limit", default=None, type=int, help="Limit the number of shards (used for debugging).", ) parser.add_argument( "--max_length", type=int, default=512, help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8.", ) parser.add_argument( "--output_dir", default="tf-tpu", type=str, help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket.", ) args = parser.parse_args() return args def tokenize_function(tokenizer): def fn(examples): return tokenizer(examples["text"]) return fn def get_serialized_examples(tokenized_data): records = [] for i in range(len(tokenized_data["input_ids"])): features = { "input_ids": tf.train.Feature(int64_list=tf.train.Int64List(value=tokenized_data["input_ids"][i])), "attention_mask": tf.train.Feature( int64_list=tf.train.Int64List(value=tokenized_data["attention_mask"][i]) ), } features = tf.train.Features(feature=features) example = tf.train.Example(features=features) record_bytes = example.SerializeToString() records.append(record_bytes) return records def main(args): dataset = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split) if args.limit is not None: max_samples = min(len(dataset), args.limit) dataset = dataset.select(range(max_samples)) print(f"Limiting the dataset to {args.limit} entries.") tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) split_dir = os.path.join(args.output_dir, args.split) if not os.path.exists(split_dir): os.makedirs(split_dir) else: split_dir = os.path.join(args.output_dir, args.split) # Tokenize the whole dataset at once. tokenize_fn = tokenize_function(tokenizer) dataset_tokenized = dataset.map(tokenize_fn, batched=True, num_proc=4, remove_columns=["text"]) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 total_length = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. result = { k: [t[i : i + args.max_length] for i in range(0, total_length, args.max_length)] for k, t in concatenated_examples.items() } return result grouped_dataset = dataset_tokenized.map(group_texts, batched=True, batch_size=1000, num_proc=4) shard_count = 0 total_records = 0 for shard in range(0, len(grouped_dataset), args.shard_size): dataset_snapshot = grouped_dataset[shard : shard + args.shard_size] records_containing = len(dataset_snapshot["input_ids"]) filename = os.path.join(split_dir, f"dataset-{shard_count}-{records_containing}.tfrecord") serialized_examples = get_serialized_examples(dataset_snapshot) with tf.io.TFRecordWriter(filename) as out_file: for i in range(len(serialized_examples)): example = serialized_examples[i] out_file.write(example) print("Wrote file {} containing {} records".format(filename, records_containing)) shard_count += 1 total_records += records_containing with open(f"split-{args.split}-records-count.txt", "w") as f: print(f"Total {args.split} records: {total_records}", file=f) if __name__ == "__main__": args = parse_args() main(args)
transformers/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py/0
{ "file_path": "transformers/examples/tensorflow/language-modeling-tpu/prepare_tfrecord_shards.py", "repo_id": "transformers", "token_count": 2654 }
294
[tool.ruff] # Never enforce `E501` (line length violations). ignore = ["C901", "E501", "E741", "F402", "F823" ] select = ["C", "E", "F", "I", "W"] line-length = 119 # Ignore import violations in all `__init__.py` files. [tool.ruff.per-file-ignores] "__init__.py" = ["E402", "F401", "F403", "F811"] "src/transformers/file_utils.py" = ["F401"] "src/transformers/utils/dummy_*.py" = ["F401"] [tool.ruff.isort] lines-after-imports = 2 known-first-party = ["transformers"] [tool.ruff.format] # Like Black, use double quotes for strings. quote-style = "double" # Like Black, indent with spaces, rather than tabs. indent-style = "space" # Like Black, respect magic trailing commas. skip-magic-trailing-comma = false # Like Black, automatically detect the appropriate line ending. line-ending = "auto" [tool.pytest.ini_options] doctest_optionflags="NUMBER NORMALIZE_WHITESPACE ELLIPSIS" doctest_glob="**/*.md" markers = [ "flash_attn_test: marks tests related to flash attention (deselect with '-m \"not flash_attn_test\"')", "bitsandbytes: select (or deselect with `not`) bitsandbytes integration tests", ]
transformers/pyproject.toml/0
{ "file_path": "transformers/pyproject.toml", "repo_id": "transformers", "token_count": 406 }
295
# This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp # Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities for working with the local dataset cache. """ import copy import csv import linecache import os import platform import sys import warnings from abc import ABC, abstractmethod from collections import defaultdict, namedtuple from datetime import datetime from multiprocessing import Pipe, Process, Queue from multiprocessing.connection import Connection from typing import Callable, Iterable, List, NamedTuple, Optional, Union from .. import AutoConfig, PretrainedConfig from .. import __version__ as version from ..utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available, logging from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): from torch.cuda import empty_cache as torch_empty_cache if is_tf_available(): from tensorflow.python.eager import context as tf_context if is_psutil_available(): import psutil if is_py3nvml_available(): import py3nvml.py3nvml as nvml if platform.system() == "Windows": from signal import CTRL_C_EVENT as SIGKILL else: from signal import SIGKILL logger = logging.get_logger(__name__) # pylint: disable=invalid-name _is_memory_tracing_enabled = False BenchmarkOutput = namedtuple( "BenchmarkOutput", [ "time_inference_result", "memory_inference_result", "time_train_result", "memory_train_result", "inference_summary", "train_summary", ], ) def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]: """ This function wraps another function into its own separated process. In order to ensure accurate memory measurements it is important that the function is executed in a separate process Args: - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not """ def multi_process_func(*args, **kwargs): # run function in an individual # process to get correct memory def wrapper_func(queue: Queue, *args): try: result = func(*args) except Exception as e: logger.error(e) print(e) result = "N/A" queue.put(result) queue = Queue() p = Process(target=wrapper_func, args=[queue] + list(args)) p.start() result = queue.get() p.join() return result if do_multi_processing: logger.info(f"Function {func} is executed in its own process...") return multi_process_func else: return func def is_memory_tracing_enabled(): global _is_memory_tracing_enabled return _is_memory_tracing_enabled class Frame(NamedTuple): """ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script """ filename: str module: str line_number: int event: str line_text: str class UsedMemoryState(NamedTuple): """ `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) """ frame: Frame cpu_memory: int gpu_memory: int class Memory(NamedTuple): """ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by calling `__repr__` - `byte` (integer): number of bytes, """ bytes: int def __repr__(self) -> str: return str(bytes_to_mega_bytes(self.bytes)) class MemoryState(NamedTuple): """ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple """ frame: Frame cpu: Memory gpu: Memory cpu_gpu: Memory class MemorySummary(NamedTuple): """ `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). """ sequential: List[MemoryState] cumulative: List[MemoryState] current: List[MemoryState] total: Memory MemoryTrace = List[UsedMemoryState] def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int: """ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package `memory_profiler`: https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage Returns: - `max_memory`: (`int`) consumed memory peak in Bytes """ def get_cpu_memory(process_id: int) -> int: """ measures current cpu memory usage of a given `process_id` Args: - `process_id`: (`int`) process_id for which to measure memory Returns - `memory`: (`int`) consumed memory in Bytes """ process = psutil.Process(process_id) try: meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info" memory = getattr(process, meminfo_attr)()[0] except psutil.AccessDenied: raise ValueError("Error with Psutil.") return memory if not is_psutil_available(): logger.warning( "Psutil not installed, we won't log CPU memory usage. " "Install Psutil (pip install psutil) to use CPU memory tracing." ) max_memory = "N/A" else: class MemoryMeasureProcess(Process): """ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the memory usage of a process """ def __init__(self, process_id: int, child_connection: Connection, interval: float): super().__init__() self.process_id = process_id self.interval = interval self.connection = child_connection self.num_measurements = 1 self.mem_usage = get_cpu_memory(self.process_id) def run(self): self.connection.send(0) stop = False while True: self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id)) self.num_measurements += 1 if stop: break stop = self.connection.poll(self.interval) # send results to parent pipe self.connection.send(self.mem_usage) self.connection.send(self.num_measurements) while True: # create child, parent connection child_connection, parent_connection = Pipe() # instantiate process mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval) mem_process.start() # wait until we get memory parent_connection.recv() try: # execute function function() # start parent connection parent_connection.send(0) # receive memory and num measurements max_memory = parent_connection.recv() num_measurements = parent_connection.recv() except Exception: # kill process in a clean way parent = psutil.Process(os.getpid()) for child in parent.children(recursive=True): os.kill(child.pid, SIGKILL) mem_process.join(0) raise RuntimeError("Process killed. Error in Process") # run process at least 20 * interval or until it finishes mem_process.join(20 * interval) if (num_measurements > 4) or (interval < 1e-6): break # reduce interval interval /= 10 return max_memory def start_memory_tracing( modules_to_trace: Optional[Union[str, Iterable[str]]] = None, modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None, events_to_trace: str = "line", gpus_to_trace: Optional[List[int]] = None, ) -> MemoryTrace: """ Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident Set Size” (the non-swapped physical memory the process is using). See https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch') - `events_to_trace`: string or list of string of events to be recorded (see official python doc for `sys.settrace` for the list of events) default to line - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs Return: - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script). - `UsedMemoryState` are named tuples with the following fields: - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file, location in current file) - 'cpu_memory': CPU RSS memory state *before* executing the line - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if provided) `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script """ if is_psutil_available(): process = psutil.Process(os.getpid()) else: logger.warning( "Psutil not installed, we won't log CPU memory usage. " "Install psutil (pip install psutil) to use CPU memory tracing." ) process = None if is_py3nvml_available(): try: nvml.nvmlInit() devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace nvml.nvmlShutdown() except (OSError, nvml.NVMLError): logger.warning("Error while initializing communication with GPU. We won't perform GPU memory tracing.") log_gpu = False else: log_gpu = is_torch_available() or is_tf_available() else: logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to use GPU memory tracing." ) log_gpu = False memory_trace = [] def traceit(frame, event, args): """ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list with debugging information """ global _is_memory_tracing_enabled if not _is_memory_tracing_enabled: return traceit # Filter events if events_to_trace is not None: if isinstance(events_to_trace, str) and event != events_to_trace: return traceit elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace: return traceit if "__name__" not in frame.f_globals: return traceit # Filter modules name = frame.f_globals["__name__"] if not isinstance(name, str): return traceit else: # Filter whitelist of modules to trace if modules_to_trace is not None: if isinstance(modules_to_trace, str) and modules_to_trace not in name: return traceit elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace): return traceit # Filter blacklist of modules not to trace if modules_not_to_trace is not None: if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name: return traceit elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace): return traceit # Record current tracing state (file, location in file...) lineno = frame.f_lineno filename = frame.f_globals["__file__"] if filename.endswith(".pyc") or filename.endswith(".pyo"): filename = filename[:-1] line = linecache.getline(filename, lineno).rstrip() traced_state = Frame(filename, name, lineno, event, line) # Record current memory state (rss memory) and compute difference with previous memory state cpu_mem = 0 if process is not None: mem = process.memory_info() cpu_mem = mem.rss gpu_mem = 0 if log_gpu: # Clear GPU caches if is_torch_available(): torch_empty_cache() if is_tf_available(): tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802 # Sum used memory for all GPUs nvml.nvmlInit() for i in devices: handle = nvml.nvmlDeviceGetHandleByIndex(i) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) gpu_mem += meminfo.used nvml.nvmlShutdown() mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem) memory_trace.append(mem_state) return traceit sys.settrace(traceit) global _is_memory_tracing_enabled _is_memory_tracing_enabled = True return memory_trace def stop_memory_tracing( memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True ) -> Optional[MemorySummary]: """ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): if True we only sum memory increase to compute total memory Return: - None if `memory_trace` is None - `MemorySummary` namedtuple otherwise with the fields: - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by subtracting the memory after executing each line from the memory before executing said line. - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory is released) - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default). `Memory` named tuple have fields - `byte` (integer): number of bytes, - `string` (string): same as human readable string (ex: "3.5MB") `Frame` are namedtuple used to list the current frame state and have the following fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields: - `frame` (`Frame`): the current frame (see above) - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple """ global _is_memory_tracing_enabled _is_memory_tracing_enabled = False if memory_trace is not None and len(memory_trace) > 1: memory_diff_trace = [] memory_curr_trace = [] cumulative_memory_dict = defaultdict(lambda: [0, 0, 0]) for ( (frame, cpu_mem, gpu_mem), (next_frame, next_cpu_mem, next_gpu_mem), ) in zip(memory_trace[:-1], memory_trace[1:]): cpu_mem_inc = next_cpu_mem - cpu_mem gpu_mem_inc = next_gpu_mem - gpu_mem cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc memory_diff_trace.append( MemoryState( frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc), ) ) memory_curr_trace.append( MemoryState( frame=frame, cpu=Memory(next_cpu_mem), gpu=Memory(next_gpu_mem), cpu_gpu=Memory(next_gpu_mem + next_cpu_mem), ) ) cumulative_memory_dict[frame][0] += cpu_mem_inc cumulative_memory_dict[frame][1] += gpu_mem_inc cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc cumulative_memory = sorted( cumulative_memory_dict.items(), key=lambda x: x[1][2], reverse=True ) # order by the total CPU + GPU memory increase cumulative_memory = [ MemoryState( frame=frame, cpu=Memory(cpu_mem_inc), gpu=Memory(gpu_mem_inc), cpu_gpu=Memory(cpu_gpu_mem_inc), ) for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory ] memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True) if ignore_released_memory: total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace) else: total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace) total_memory = Memory(total_memory) return MemorySummary( sequential=memory_diff_trace, cumulative=cumulative_memory, current=memory_curr_trace, total=total_memory, ) return None def bytes_to_mega_bytes(memory_amount: int) -> int: """Utility to convert a number of bytes (int) into a number of mega bytes (int)""" return memory_amount >> 20 class Benchmark(ABC): """ Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in Transformers. """ args: BenchmarkArguments configs: PretrainedConfig framework: str def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None): self.args = args if configs is None: self.config_dict = { model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names } else: self.config_dict = dict(zip(self.args.model_names, configs)) warnings.warn( f"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils" " are deprecated in general and it is advised to use external Benchmarking libraries " " to benchmark Transformer models.", FutureWarning, ) if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0: logger.warning( "Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The" " flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing." ) self._print_fn = None self._framework_version = None self._environment_info = None @property def print_fn(self): if self._print_fn is None: if self.args.log_print: def print_and_log(*args): with open(self.args.log_filename, "a") as log_file: log_file.write("".join(args) + "\n") print(*args) self._print_fn = print_and_log else: self._print_fn = print return self._print_fn @property @abstractmethod def framework_version(self): pass @abstractmethod def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: pass @abstractmethod def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: pass @abstractmethod def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: pass def inference_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs) def train_speed(self, *args, **kwargs) -> float: return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs) def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs) def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]: return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs) def run(self): result_dict = {model_name: {} for model_name in self.args.model_names} inference_result_time = copy.deepcopy(result_dict) inference_result_memory = copy.deepcopy(result_dict) train_result_time = copy.deepcopy(result_dict) train_result_memory = copy.deepcopy(result_dict) for c, model_name in enumerate(self.args.model_names): self.print_fn(f"{c + 1} / {len(self.args.model_names)}") model_dict = { "bs": self.args.batch_sizes, "ss": self.args.sequence_lengths, "result": {i: {} for i in self.args.batch_sizes}, } inference_result_time[model_name] = copy.deepcopy(model_dict) inference_result_memory[model_name] = copy.deepcopy(model_dict) train_result_time[model_name] = copy.deepcopy(model_dict) train_result_memory[model_name] = copy.deepcopy(model_dict) inference_summary = train_summary = None for batch_size in self.args.batch_sizes: for sequence_length in self.args.sequence_lengths: if self.args.inference: if self.args.memory: memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length) inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory if self.args.speed: time = self.inference_speed(model_name, batch_size, sequence_length) inference_result_time[model_name]["result"][batch_size][sequence_length] = time if self.args.training: if self.args.memory: memory, train_summary = self.train_memory(model_name, batch_size, sequence_length) train_result_memory[model_name]["result"][batch_size][sequence_length] = memory if self.args.speed: time = self.train_speed(model_name, batch_size, sequence_length) train_result_time[model_name]["result"][batch_size][sequence_length] = time if self.args.inference: if self.args.speed: self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=") self.print_results(inference_result_time, type_label="Time in s") self.save_to_csv(inference_result_time, self.args.inference_time_csv_file) if self.args.is_tpu: self.print_fn( "TPU was used for inference. Note that the time after compilation stabilized (after ~10" " inferences model.forward(..) calls) was measured." ) if self.args.memory: self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=") self.print_results(inference_result_memory, type_label="Memory in MB") self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=") self.print_memory_trace_statistics(inference_summary) if self.args.training: if self.args.speed: self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=") self.print_results(train_result_time, "Time in s") self.save_to_csv(train_result_time, self.args.train_time_csv_file) if self.args.is_tpu: self.print_fn( "TPU was used for training. Note that the time after compilation stabilized (after ~10 train" " loss=model.forward(...) + loss.backward() calls) was measured." ) if self.args.memory: self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=") self.print_results(train_result_memory, type_label="Memory in MB") self.save_to_csv(train_result_memory, self.args.train_memory_csv_file) if self.args.trace_memory_line_by_line: self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=") self.print_memory_trace_statistics(train_summary) if self.args.env_print: self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=") self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n") if self.args.save_to_csv: with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file: writer = csv.writer(csv_file) for key, value in self.environment_info.items(): writer.writerow([key, value]) return BenchmarkOutput( inference_result_time, inference_result_memory, train_result_time, train_result_memory, inference_summary, train_summary, ) @property def environment_info(self): if self._environment_info is None: info = {} info["transformers_version"] = version info["framework"] = self.framework if self.framework == "PyTorch": info["use_torchscript"] = self.args.torchscript if self.framework == "TensorFlow": info["eager_mode"] = self.args.eager_mode info["use_xla"] = self.args.use_xla info["framework_version"] = self.framework_version info["python_version"] = platform.python_version() info["system"] = platform.system() info["cpu"] = platform.processor() info["architecture"] = platform.architecture()[0] info["date"] = datetime.date(datetime.now()) info["time"] = datetime.time(datetime.now()) info["fp16"] = self.args.fp16 info["use_multiprocessing"] = self.args.do_multi_processing info["only_pretrain_model"] = self.args.only_pretrain_model if is_psutil_available(): info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total) else: logger.warning( "Psutil not installed, we won't log available CPU memory. " "Install psutil (pip install psutil) to log available CPU memory." ) info["cpu_ram_mb"] = "N/A" info["use_gpu"] = self.args.is_gpu if self.args.is_gpu: info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported if is_py3nvml_available(): nvml.nvmlInit() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) info["gpu"] = nvml.nvmlDeviceGetName(handle) info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total) info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000 info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle) nvml.nvmlShutdown() else: logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) info["gpu"] = "N/A" info["gpu_ram_mb"] = "N/A" info["gpu_power_watts"] = "N/A" info["gpu_performance_state"] = "N/A" info["use_tpu"] = self.args.is_tpu # TODO(PVP): See if we can add more information about TPU # see: https://github.com/pytorch/xla/issues/2180 self._environment_info = info return self._environment_info def print_results(self, result_dict, type_label): self.print_fn(80 * "-") self.print_fn( "Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15) ) self.print_fn(80 * "-") for model_name in self.args.model_names: for batch_size in result_dict[model_name]["bs"]: for sequence_length in result_dict[model_name]["ss"]: result = result_dict[model_name]["result"][batch_size][sequence_length] if isinstance(result, float): result = round(1000 * result) / 1000 result = "< 0.001" if result == 0.0 else str(result) else: result = str(result) self.print_fn( model_name[:30].center(30) + str(batch_size).center(15), str(sequence_length).center(15), result.center(15), ) self.print_fn(80 * "-") def print_memory_trace_statistics(self, summary: MemorySummary): self.print_fn( "\nLine by line memory consumption:\n" + "\n".join( f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.sequential ) ) self.print_fn( "\nLines with top memory consumption:\n" + "\n".join( f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.cumulative[:6] ) ) self.print_fn( "\nLines with lowest memory consumption:\n" + "\n".join( f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}" for state in summary.cumulative[-6:] ) ) self.print_fn(f"\nTotal memory increase: {summary.total}") def save_to_csv(self, result_dict, filename): if not self.args.save_to_csv: return self.print_fn("Saving results to csv.") with open(filename, mode="w") as csv_file: if len(self.args.model_names) <= 0: raise ValueError(f"At least 1 model should be defined, but got {self.model_names}") fieldnames = ["model", "batch_size", "sequence_length"] writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"]) writer.writeheader() for model_name in self.args.model_names: result_dict_model = result_dict[model_name]["result"] for bs in result_dict_model: for ss in result_dict_model[bs]: result_model = result_dict_model[bs][ss] writer.writerow( { "model": model_name, "batch_size": bs, "sequence_length": ss, "result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format( result_model ), } )
transformers/src/transformers/benchmark/benchmark_utils.py/0
{ "file_path": "transformers/src/transformers/benchmark/benchmark_utils.py", "repo_id": "transformers", "token_count": 16506 }
296
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from argparse import ArgumentParser from os import listdir, makedirs from pathlib import Path from typing import Dict, List, Optional, Tuple from packaging.version import Version, parse from transformers.pipelines import Pipeline, pipeline from transformers.tokenization_utils import BatchEncoding from transformers.utils import ModelOutput, is_tf_available, is_torch_available # This is the minimal required version to # support some ONNX Runtime features ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") SUPPORTED_PIPELINES = [ "feature-extraction", "ner", "sentiment-analysis", "fill-mask", "question-answering", "text-generation", "translation_en_to_fr", "translation_en_to_de", "translation_en_to_ro", ] class OnnxConverterArgumentParser(ArgumentParser): """ Wraps all the script arguments supported to export transformers models to ONNX IR """ def __init__(self): super().__init__("ONNX Converter") self.add_argument( "--pipeline", type=str, choices=SUPPORTED_PIPELINES, default="feature-extraction", ) self.add_argument( "--model", type=str, required=True, help="Model's id or path (ex: google-bert/bert-base-cased)", ) self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: google-bert/bert-base-cased)") self.add_argument( "--framework", type=str, choices=["pt", "tf"], help="Framework for loading the model", ) self.add_argument("--opset", type=int, default=11, help="ONNX opset to use") self.add_argument( "--check-loading", action="store_true", help="Check ONNX is able to load the model", ) self.add_argument( "--use-external-format", action="store_true", help="Allow exporting model >= than 2Gb", ) self.add_argument( "--quantize", action="store_true", help="Quantize the neural network to be run with int8", ) self.add_argument("output") def generate_identified_filename(filename: Path, identifier: str) -> Path: """ Append a string-identifier at the end (before the extension, if any) to the provided filepath Args: filename: pathlib.Path The actual path object we would like to add an identifier suffix identifier: The suffix to add Returns: String with concatenated identifier at the end of the filename """ return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix) def check_onnxruntime_requirements(minimum_version: Version): """ Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found """ try: import onnxruntime # Parse the version of the installed onnxruntime ort_version = parse(onnxruntime.__version__) # We require 1.4.0 minimum if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError( f"We found an older version of onnxruntime ({onnxruntime.__version__}) " f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" "Please update onnxruntime by running `pip install --upgrade onnxruntime`" ) except ImportError: raise ImportError( "onnxruntime doesn't seem to be currently installed. " "Please install the onnxruntime by running `pip install onnxruntime`" " and relaunch the conversion." ) def ensure_valid_input(model, tokens, input_names): """ Ensure inputs are presented in the correct order, without any Non Args: model: The model used to forward the input data tokens: BatchEncoding holding the input data input_names: The name of the inputs Returns: Tuple """ print("Ensuring inputs are in correct order") model_args_name = model.forward.__code__.co_varnames model_args, ordered_input_names = [], [] for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument if arg_name in input_names: ordered_input_names.append(arg_name) model_args.append(tokens[arg_name]) else: print(f"{arg_name} is not present in the generated input list.") break print(f"Generated inputs order: {ordered_input_names}") return ordered_input_names, tuple(model_args) def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]: """ Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model Args: nlp: The pipeline object holding the model to be exported framework: The framework identifier to dispatch to the correct inference scheme (pt/tf) Returns: - List of the inferred input variable names - List of the inferred output variable names - Dictionary with input/output variables names as key and shape tensor as value - a BatchEncoding reference which was used to infer all the above information """ def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int): if isinstance(tensor, (tuple, list)): return [build_shape_dict(name, t, is_input, seq_len) for t in tensor] else: # Let's assume batch is the first axis with only 1 element (~~ might not be always true ...) axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"} if is_input: if len(tensor.shape) == 2: axes[1] = "sequence" else: raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})") else: seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len] axes.update({dim: "sequence" for dim in seq_axes}) print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}") return axes tokens = nlp.tokenizer("This is a sample output", return_tensors=framework) seq_len = tokens.input_ids.shape[-1] outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens) if isinstance(outputs, ModelOutput): outputs = outputs.to_tuple() if not isinstance(outputs, (list, tuple)): outputs = (outputs,) # Generate input names & axes input_vars = list(tokens.keys()) input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()} # flatten potentially grouped outputs (past for gpt2, attentions) outputs_flat = [] for output in outputs: if isinstance(output, (tuple, list)): outputs_flat.extend(output) else: outputs_flat.append(output) # Generate output names & axes output_names = [f"output_{i}" for i in range(len(outputs_flat))] output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)} # Create the aggregated axes representation dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes) return input_vars, output_names, dynamic_axes, tokens def load_graph_from_args( pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs ) -> Pipeline: """ Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model Args: pipeline_name: The kind of pipeline to use (ner, question-answering, etc.) framework: The actual model to convert the pipeline from ("pt" or "tf") model: The model name which will be loaded by the pipeline tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value Returns: Pipeline object """ # If no tokenizer provided if tokenizer is None: tokenizer = model # Check the wanted framework is available if framework == "pt" and not is_torch_available(): raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.") if framework == "tf" and not is_tf_available(): raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.") print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})") # Allocate tokenizer and model return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs) def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool): """ Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR Args: nlp: The pipeline to be exported opset: The actual version of the ONNX operator set to use output: Path where will be stored the generated ONNX model use_external_format: Split the model definition from its parameters to allow model bigger than 2GB Returns: """ if not is_torch_available(): raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.") import torch from torch.onnx import export print(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt") ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names) export( nlp.model, model_args, f=output.as_posix(), input_names=ordered_input_names, output_names=output_names, dynamic_axes=dynamic_axes, do_constant_folding=True, opset_version=opset, ) def convert_tensorflow(nlp: Pipeline, opset: int, output: Path): """ Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR) Args: nlp: The pipeline to be exported opset: The actual version of the ONNX operator set to use output: Path where will be stored the generated ONNX model Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow """ if not is_tf_available(): raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.") print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\") try: import tensorflow as tf import tf2onnx from tf2onnx import __version__ as t2ov print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}") # Build input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf") # Forward nlp.model.predict(tokens.data) input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in tokens.items()] model_proto, _ = tf2onnx.convert.from_keras( nlp.model, input_signature, opset=opset, output_path=output.as_posix() ) except ImportError as e: raise Exception( f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first. {e}" ) def convert( framework: str, model: str, output: Path, opset: int, tokenizer: Optional[str] = None, use_external_format: bool = False, pipeline_name: str = "feature-extraction", **model_kwargs, ): """ Convert the pipeline object to the ONNX Intermediate Representation (IR) format Args: framework: The framework the pipeline is backed by ("pt" or "tf") model: The name of the model to load for the pipeline output: The path where the ONNX graph will be stored opset: The actual version of the ONNX operator set to use tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided use_external_format: Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only) pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.) model_kwargs: Keyword arguments to be forwarded to the model constructor Returns: """ warnings.warn( "The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of" " Transformers", FutureWarning, ) print(f"ONNX opset version set to: {opset}") # Load the pipeline nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs) if not output.parent.exists(): print(f"Creating folder {output.parent}") makedirs(output.parent.as_posix()) elif len(listdir(output.parent.as_posix())) > 0: raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion") # Export the graph if framework == "pt": convert_pytorch(nlp, opset, output, use_external_format) else: convert_tensorflow(nlp, opset, output) def optimize(onnx_model_path: Path) -> Path: """ Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the optimizations possible Args: onnx_model_path: filepath where the model binary description is stored Returns: Path where the optimized model binary description has been saved """ from onnxruntime import InferenceSession, SessionOptions # Generate model name with suffix "optimized" opt_model_path = generate_identified_filename(onnx_model_path, "-optimized") sess_option = SessionOptions() sess_option.optimized_model_filepath = opt_model_path.as_posix() _ = InferenceSession(onnx_model_path.as_posix(), sess_option) print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}") print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\") return opt_model_path def quantize(onnx_model_path: Path) -> Path: """ Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU Args: onnx_model_path: Path to location the exported ONNX model is stored Returns: The Path generated for the quantized """ import onnx import onnxruntime from onnx.onnx_pb import ModelProto from onnxruntime.quantization import QuantizationMode from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer from onnxruntime.quantization.registry import IntegerOpsRegistry # Load the ONNX model onnx_model = onnx.load(onnx_model_path.as_posix()) if parse(onnx.__version__) < parse("1.5.0"): print( "Models larger than 2GB will fail to quantize due to protobuf constraint.\n" "Please upgrade to onnxruntime >= 1.5.0." ) # Copy it copy_model = ModelProto() copy_model.CopyFrom(onnx_model) # Construct quantizer # onnxruntime renamed input_qType to activation_qType in v1.13.1, so we # check the onnxruntime version to ensure backward compatibility. # See also: https://github.com/microsoft/onnxruntime/pull/12873 if parse(onnxruntime.__version__) < parse("1.13.1"): quantizer = ONNXQuantizer( model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, input_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry), ) else: quantizer = ONNXQuantizer( model=copy_model, per_channel=False, reduce_range=False, mode=QuantizationMode.IntegerOps, static=False, weight_qType=True, activation_qType=False, tensors_range=None, nodes_to_quantize=None, nodes_to_exclude=None, op_types_to_quantize=list(IntegerOpsRegistry), ) # Quantize and export quantizer.quantize_model() # Append "-quantized" at the end of the model's name quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized") # Save model print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}") onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) return quantized_model_path def verify(path: Path): from onnxruntime import InferenceSession, SessionOptions from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException print(f"Checking ONNX model loading from: {path} ...") try: onnx_options = SessionOptions() _ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"]) print(f"Model {path} correctly loaded: \N{heavy check mark}") except RuntimeException as re: print(f"Error while loading the model {re}: \N{heavy ballot x}") if __name__ == "__main__": parser = OnnxConverterArgumentParser() args = parser.parse_args() # Make sure output is absolute path args.output = Path(args.output).absolute() try: print("\n====== Converting model to ONNX ======") # Convert convert( args.framework, args.model, args.output, args.opset, args.tokenizer, args.use_external_format, args.pipeline, ) if args.quantize: # Ensure requirements for quantization on onnxruntime is met check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION) # onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch if args.framework == "tf": print( "\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n" "\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n" "\t For more information, please refer to the onnxruntime documentation:\n" "\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n" ) print("\n====== Optimizing ONNX model ======") # Quantization works best when using the optimized version of the model args.optimized_output = optimize(args.output) # Do the quantization on the right graph args.quantized_output = quantize(args.optimized_output) # And verify if args.check_loading: print("\n====== Check exported ONNX model(s) ======") verify(args.output) if hasattr(args, "optimized_output"): verify(args.optimized_output) if hasattr(args, "quantized_output"): verify(args.quantized_output) except Exception as e: print(f"Error while converting the model: {e}") exit(1)
transformers/src/transformers/convert_graph_to_onnx.py/0
{ "file_path": "transformers/src/transformers/convert_graph_to_onnx.py", "repo_id": "transformers", "token_count": 7910 }
297
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import dataclasses import json from dataclasses import dataclass from typing import List, Optional, Union from ...utils import is_tf_available, is_torch_available, logging logger = logging.get_logger(__name__) @dataclass class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(dataclasses.asdict(self), indent=2) + "\n" @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) tokens. token_type_ids: (Optional) Segment token indices to indicate first and second portions of the inputs. Only some models use them. label: (Optional) Label corresponding to the input. Int for classification problems, float for regression problems. """ input_ids: List[int] attention_mask: Optional[List[int]] = None token_type_ids: Optional[List[int]] = None label: Optional[Union[int, float]] = None def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(dataclasses.asdict(self)) + "\n" class DataProcessor: """Base class for data converters for sequence classification data sets.""" def get_example_from_tensor_dict(self, tensor_dict): """ Gets an example from a dict with tensorflow tensors. Args: tensor_dict: Keys and values should match the corresponding Glue tensorflow_dataset examples. """ raise NotImplementedError() def get_train_examples(self, data_dir): """Gets a collection of [`InputExample`] for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of [`InputExample`] for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of [`InputExample`] for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() def tfds_map(self, example): """ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts examples to the correct format. """ if len(self.get_labels()) > 1: example.label = self.get_labels()[int(example.label)] return example @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8-sig") as f: return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) class SingleSentenceClassificationProcessor(DataProcessor): """Generic processor for a single sentence classification data set.""" def __init__(self, labels=None, examples=None, mode="classification", verbose=False): self.labels = [] if labels is None else labels self.examples = [] if examples is None else examples self.mode = mode self.verbose = verbose def __len__(self): return len(self.examples) def __getitem__(self, idx): if isinstance(idx, slice): return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) return self.examples[idx] @classmethod def create_from_csv( cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs ): processor = cls(**kwargs) processor.add_examples_from_csv( file_name, split_name=split_name, column_label=column_label, column_text=column_text, column_id=column_id, skip_first_row=skip_first_row, overwrite_labels=True, overwrite_examples=True, ) return processor @classmethod def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): processor = cls(**kwargs) processor.add_examples(texts_or_text_and_labels, labels=labels) return processor def add_examples_from_csv( self, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False, ): lines = self._read_tsv(file_name) if skip_first_row: lines = lines[1:] texts = [] labels = [] ids = [] for i, line in enumerate(lines): texts.append(line[column_text]) labels.append(line[column_label]) if column_id is not None: ids.append(line[column_id]) else: guid = f"{split_name}-{i}" if split_name else str(i) ids.append(guid) return self.add_examples( texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples ) def add_examples( self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False ): if labels is not None and len(texts_or_text_and_labels) != len(labels): raise ValueError( f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" ) if ids is not None and len(texts_or_text_and_labels) != len(ids): raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") if ids is None: ids = [None] * len(texts_or_text_and_labels) if labels is None: labels = [None] * len(texts_or_text_and_labels) examples = [] added_labels = set() for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): if isinstance(text_or_text_and_label, (tuple, list)) and label is None: text, label = text_or_text_and_label else: text = text_or_text_and_label added_labels.add(label) examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) # Update examples if overwrite_examples: self.examples = examples else: self.examples.extend(examples) # Update labels if overwrite_labels: self.labels = list(added_labels) else: self.labels = list(set(self.labels).union(added_labels)) return self.examples def get_features( self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None, ): """ Convert examples in a list of `InputFeatures` Args: tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) pad_token: Padding token mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual values) Returns: If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which can be fed to the model. """ if max_length is None: max_length = tokenizer.max_len label_map = {label: i for i, label in enumerate(self.labels)} all_input_ids = [] for ex_index, example in enumerate(self.examples): if ex_index % 10000 == 0: logger.info(f"Tokenizing example {ex_index}") input_ids = tokenizer.encode( example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len), ) all_input_ids.append(input_ids) batch_length = max(len(input_ids) for input_ids in all_input_ids) features = [] for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): if ex_index % 10000 == 0: logger.info(f"Writing example {ex_index}/{len(self.examples)}") # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = batch_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask else: input_ids = input_ids + ([pad_token] * padding_length) attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) if len(input_ids) != batch_length: raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") if len(attention_mask) != batch_length: raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") if self.mode == "classification": label = label_map[example.label] elif self.mode == "regression": label = float(example.label) else: raise ValueError(self.mode) if ex_index < 5 and self.verbose: logger.info("*** Example ***") logger.info(f"guid: {example.guid}") logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") logger.info(f"label: {example.label} (id = {label})") features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) if return_tensors is None: return features elif return_tensors == "tf": if not is_tf_available(): raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") import tensorflow as tf def gen(): for ex in features: yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) dataset = tf.data.Dataset.from_generator( gen, ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), ) return dataset elif return_tensors == "pt": if not is_torch_available(): raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") import torch from torch.utils.data import TensorDataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if self.mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif self.mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) return dataset else: raise ValueError("return_tensors should be one of 'tf' or 'pt'")
transformers/src/transformers/data/processors/utils.py/0
{ "file_path": "transformers/src/transformers/data/processors/utils.py", "repo_id": "transformers", "token_count": 5994 }
298
# coding=utf-8 # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from functools import partial from typing import Any, Dict, Optional, Union import flax import jax import jax.numpy as jnp import numpy as np from jax import lax from ..models.auto import ( FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, ) from ..utils import ModelOutput, logging from .configuration_utils import GenerationConfig from .flax_logits_process import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxForceTokensLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxSuppressTokensAtBeginLogitsProcessor, FlaxSuppressTokensLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) logger = logging.get_logger(__name__) @flax.struct.dataclass class FlaxGreedySearchOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. """ sequences: jnp.ndarray = None @flax.struct.dataclass class FlaxSampleOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using sampling. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. """ sequences: jnp.ndarray = None @flax.struct.dataclass class FlaxBeamSearchOutput(ModelOutput): """ Flax Base class for outputs of decoder-only generation models using greedy search. Args: sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): The generated sequences. scores (`jnp.ndarray` of shape `(batch_size,)`): The scores (log probabilities) of the generated sequences. """ sequences: jnp.ndarray = None scores: jnp.ndarray = None @flax.struct.dataclass class GreedyState: cur_len: jnp.ndarray sequences: jnp.ndarray running_token: jnp.ndarray is_sent_finished: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] @flax.struct.dataclass class SampleState: cur_len: jnp.ndarray sequences: jnp.ndarray running_token: jnp.ndarray is_sent_finished: jnp.ndarray prng_key: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] @flax.struct.dataclass class BeamSearchState: cur_len: jnp.ndarray running_sequences: jnp.ndarray running_scores: jnp.ndarray sequences: jnp.ndarray scores: jnp.ndarray is_sent_finished: jnp.ndarray model_kwargs: Dict[str, jnp.ndarray] class FlaxGenerationMixin: """ A class containing all functions for auto-regressive text generation, to be used as a mixin in [`FlaxPreTrainedModel`]. The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for: - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and `do_sample=False` - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and `do_sample=True` - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and `do_sample=False` You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). """ def prepare_inputs_for_generation(self, *args, **kwargs): raise NotImplementedError( "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." ) @staticmethod def _run_loop_in_debug(cond_fn, body_fn, init_state): """ Run generation in untraced mode. This should only be used for debugging purposes. """ state = init_state while cond_fn(state): state = body_fn(state) return state def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs): encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not (argument.startswith("decoder_") or argument.startswith("cross_attn")) } model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, batch_size: int, decoder_start_token_id: int = None, bos_token_id: int = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ) -> jnp.ndarray: if model_kwargs is not None and "decoder_input_ids" in model_kwargs: # Only use this arg if not None, otherwise just remove from model_kwargs decoder_input_ids = model_kwargs.pop("decoder_input_ids") if decoder_input_ids is not None: return decoder_input_ids decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0) def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: # retrieve decoder_start_token_id for encoder-decoder models # fall back to bos_token_id if necessary decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.generation_config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_to_num_beams(tensor, num_beams): return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) def _adapt_logits_for_beam_search(self, logits): """ This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`]. """ return logits def _validate_model_class(self): """ Confirms that the model class is compatible with generation. If not, raises an exception that points to the right class to use. """ if not self.can_generate(): generate_compatible_mappings = [ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, ] generate_compatible_classes = set() for model_mapping in generate_compatible_mappings: supported_models = model_mapping.get(type(self.config), default=None) if supported_models is not None: generate_compatible_classes.add(supported_models.__name__) exception_message = ( f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " "it doesn't have a language model head." ) if generate_compatible_classes: exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" raise TypeError(exception_message) def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" unused_model_args = [] model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) if "kwargs" in model_args or "model_kwargs" in model_args: model_args |= set(inspect.signature(self.__call__).parameters) for key, value in model_kwargs.items(): if value is not None and key not in model_args: unused_model_args.append(key) if unused_model_args: raise ValueError( f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" " generate arguments will also show up in this list)" ) def generate( self, input_ids: jnp.ndarray, generation_config: Optional[GenerationConfig] = None, prng_key: Optional[jnp.ndarray] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, **kwargs, ): r""" Generates sequences of token ids for models with a language modeling head. Parameters: input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. `**kwargs` passed to generate matching the attributes of `generation_config` will override them. If `generation_config` is not provided, the default will be used, which had the following loading priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s default values, whose documentation should be checked to parameterize generation. trace (`bool`, *optional*, defaults to `True`): Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a considerably slower runtime. params (`Dict[str, jnp.ndarray]`, *optional*): Optionally the model parameters can be passed. Can be useful for parallelized generation. logits_processor (`FlaxLogitsProcessorList `, *optional*): Custom logits processors that complement the default logits processors built from arguments and generation config. If a logit processor is passed that is already created with the arguments or a generation config an error is thrown. This feature is intended for advanced users. kwargs (`Dict[str, Any]`, *optional*): Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. Return: [`~utils.ModelOutput`]. """ # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call self._validate_model_class() # priority: `generation_config` argument > `model.generation_config` (the default generation config) if generation_config is None: # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, # two conditions must be met # 1) the generation config must have been created from the model config (`_from_model_config` field); # 2) the generation config must have seen no modification since its creation (the hash is the same). if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( self.generation_config ): new_generation_config = GenerationConfig.from_model_config(self.config) if new_generation_config != self.generation_config: warnings.warn( "You have modified the pretrained model configuration to control generation. This is a" " deprecated strategy to control generation and will be removed soon, in a future version." " Please use and modify the model generation configuration (see" " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" ) self.generation_config = new_generation_config generation_config = self.generation_config generation_config = copy.deepcopy(generation_config) model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs self._validate_model_kwargs(model_kwargs.copy()) logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList() # set init values prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: if model_kwargs.get("attention_mask") is None: logger.warning( "The attention mask and the pad token id were not set. As a consequence, you may observe " "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." ) eos_token_id = generation_config.eos_token_id if isinstance(eos_token_id, list): eos_token_id = eos_token_id[0] logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") generation_config.pad_token_id = eos_token_id if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder: raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) if not self.config.is_encoder_decoder and not trace: if ( generation_config.pad_token_id is not None and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0 ): logger.warning( "A decoder-only architecture is being used, but right-padding was detected! For correct " "generation results, please set `padding_side='left'` when initializing the tokenizer." ) batch_size = input_ids.shape[0] if self.config.is_encoder_decoder: # add encoder_outputs to model_kwargs if model_kwargs.get("encoder_outputs") is None: model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs) # prepare decoder_input_ids for generation input_ids = self._prepare_decoder_input_ids_for_generation( batch_size, decoder_start_token_id=generation_config.decoder_start_token_id, bos_token_id=generation_config.bos_token_id, model_kwargs=model_kwargs, ) # Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: # 20 is the default max_length of the generation config warnings.warn( f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", UserWarning, ) elif generation_config.max_new_tokens is not None: if not has_default_max_length and generation_config.max_length is not None: logger.warning( f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " "Please refer to the documentation for more information. " "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" ) generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: raise ValueError( f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than" f" the maximum length ({generation_config.max_length})" ) if input_ids_seq_length >= generation_config.max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" " increasing`max_new_tokens`." ) logits_processor = self._get_logits_processor( generation_config=generation_config, input_ids_seq_length=input_ids_seq_length, logits_processor=logits_processor, ) if not generation_config.do_sample and generation_config.num_beams == 1: return self._greedy_search( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) elif generation_config.do_sample and generation_config.num_beams == 1: logits_warper = self._get_logits_warper(generation_config=generation_config) return self._sample( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, prng_key, logits_warper=logits_warper, logits_processor=logits_processor, trace=trace, params=params, model_kwargs=model_kwargs, ) elif not generation_config.do_sample and generation_config.num_beams > 1: # broadcast input_ids & encoder_outputs input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams) if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams( model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams ) for kwarg in ["attention_mask", "decoder_attention_mask"]: if kwarg in model_kwargs: model_kwargs[kwarg] = self._expand_to_num_beams( model_kwargs[kwarg], num_beams=generation_config.num_beams ) return self._beam_search( input_ids, generation_config.max_length, generation_config.pad_token_id, generation_config.eos_token_id, length_penalty=generation_config.length_penalty, early_stopping=generation_config.early_stopping, logits_processor=logits_processor, trace=trace, params=params, num_return_sequences=generation_config.num_return_sequences, model_kwargs=model_kwargs, ) else: raise NotImplementedError("`Beam sampling is currently not implemented.") def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList: """ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`] instances used for multinomial sampling. """ warpers = FlaxLogitsProcessorList() if generation_config.temperature is not None and generation_config.temperature != 1.0: warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature)) if generation_config.top_k is not None and generation_config.top_k != 0: warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) if generation_config.top_p is not None and generation_config.top_p < 1.0: warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) return warpers def _get_logits_processor( self, generation_config: GenerationConfig, input_ids_seq_length: int, logits_processor: Optional[FlaxLogitsProcessorList], ) -> FlaxLogitsProcessorList: """ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`] instances used to modify the scores of the language model head. """ processors = FlaxLogitsProcessorList() if ( generation_config.min_length is not None and generation_config.eos_token_id is not None and generation_config.min_length > -1 ): processors.append( FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id) ) if generation_config.forced_bos_token_id is not None: processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) if generation_config.forced_eos_token_id is not None: processors.append( FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) ) if generation_config.suppress_tokens is not None: processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) if generation_config.begin_suppress_tokens is not None: begin_index = input_ids_seq_length begin_index = ( begin_index if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) else begin_index + 1 ) if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0: # generation starts after the last token that is forced begin_index += generation_config.forced_decoder_ids[-1][0] processors.append( FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) ) if generation_config.forced_decoder_ids is not None: forced_decoder_ids = [ [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids ] processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) return processors def _merge_criteria_processor_list( self, default_list: FlaxLogitsProcessorList, custom_list: FlaxLogitsProcessorList, ) -> FlaxLogitsProcessorList: if len(custom_list) == 0: return default_list for default in default_list: for custom in custom_list: if type(custom) is type(default): object_type = "logits processor" raise ValueError( f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" f" `generate`, but it has already been created with the values {default}. {default} has been" " created by passing the corresponding arguments to generate or by the model's config default" f" values. If you just want to change the default values of {object_type} consider passing" f" them as arguments to `generate` instead of using a custom {object_type}." ) default_list.extend(custom_list) return default_list def _greedy_search( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id batch_size, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # per batch-item holding current token in loop. sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) # per batch-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) # initialize state state = GreedyState( cur_len=cur_len, sequences=sequences, running_token=input_ids, is_sent_finished=is_sent_finished, model_kwargs=model_kwargs, ) def greedy_search_cond_fn(state): """state termination condition fn.""" has_reached_max_length = state.cur_len == max_length all_sequence_finished = jnp.all(state.is_sent_finished) finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) return ~finish_generation def greedy_search_body_fn(state): """state update fn.""" model_outputs = model(state.running_token, params=params, **state.model_kwargs) logits = model_outputs.logits[:, -1] # apply min_length, ... logits = logits_processor(state.sequences, logits, state.cur_len) next_token = jnp.argmax(logits, axis=-1) next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) next_token = next_token[:, None] next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return GreedyState( cur_len=state.cur_len + 1, sequences=next_sequences, running_token=next_token, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[1] > 1: state = greedy_search_body_fn(state) if not trace: state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state) else: state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state) return FlaxGreedySearchOutput(sequences=state.sequences) def _sample( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, prng_key: Optional[jnp.ndarray] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, logits_warper: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) batch_size, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # per batch-item holding current token in loop. sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) # per batch-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) # initialize state state = SampleState( cur_len=cur_len, sequences=sequences, running_token=input_ids, is_sent_finished=is_sent_finished, prng_key=prng_key, model_kwargs=model_kwargs, ) def sample_search_cond_fn(state): """state termination condition fn.""" has_reached_max_length = state.cur_len == max_length all_sequence_finished = jnp.all(state.is_sent_finished) finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) return ~finish_generation def sample_search_body_fn(state): """state update fn.""" prng_key, prng_key_next = jax.random.split(state.prng_key) model_outputs = model(state.running_token, params=params, **state.model_kwargs) logits = model_outputs.logits[:, -1] # apply min_length, ... logits = logits_processor(state.sequences, logits, state.cur_len) # apply top_p, top_k, temperature logits = logits_warper(logits, logits, state.cur_len) next_token = jax.random.categorical(prng_key, logits, axis=-1) next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) next_token = next_token[:, None] next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return SampleState( cur_len=state.cur_len + 1, sequences=next_sequences, running_token=next_token, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, prng_key=prng_key_next, ) # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU if input_ids.shape[1] > 1: state = sample_search_body_fn(state) if not trace: state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state) else: state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) return FlaxSampleOutput(sequences=state.sequences) def _beam_search( self, input_ids: None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, early_stopping: Optional[Union[bool, str]] = None, logits_processor: Optional[FlaxLogitsProcessorList] = None, trace: bool = True, params: Optional[Dict[str, jnp.ndarray]] = None, num_return_sequences: Optional[int] = None, model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, ): """ This beam search function is heavily inspired by Flax's official example: https://github.com/google/flax/blob/main/examples/wmt/decode.py """ def flatten_beam_dim(tensor): """Flattens the first two dimensions of a non-scalar array.""" # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) def unflatten_beam_dim(tensor, batch_size, num_beams): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor return tensor.reshape((batch_size, num_beams) + tensor.shape[1:]) def gather_beams(nested, beam_indices, batch_size, new_num_beams): """ Gathers the beam slices indexed by beam_indices into new beam array. """ batch_indices = jnp.reshape( jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams) ) def gather_fn(tensor): # ignore scalars (e.g. cache index) if tensor.ndim == 0: return tensor else: return tensor[batch_indices, beam_indices] return jax.tree_util.tree_map(gather_fn, nested) # init values max_length = max_length if max_length is not None else self.generation_config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences ) batch_size, num_beams, cur_len = input_ids.shape eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) cur_len = jnp.array(cur_len) # record the prompt length of decoder decoder_prompt_len = input_ids.shape[-1] # per batch,beam-item holding current token in loop. sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0)) # per batch,beam-item state bit indicating if sentence has finished. is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_) # per batch,beam-item score, logprobs running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1]) scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7) # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. model = self.decode if self.config.is_encoder_decoder else self # flatten beam dim if "encoder_outputs" in model_kwargs: model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( model_kwargs["encoder_outputs"]["last_hidden_state"] ) for kwarg in ["attention_mask", "decoder_attention_mask"]: if kwarg in model_kwargs: model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg]) # initialize model specific kwargs model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs) # initialize state state = BeamSearchState( cur_len=cur_len, running_sequences=running_sequences, running_scores=running_scores, sequences=sequences, scores=scores, is_sent_finished=is_sent_finished, model_kwargs=model_kwargs, ) def beam_search_cond_fn(state): """beam search state termination condition fn.""" # 1. is less than max length? not_max_length_yet = state.cur_len < max_length # 2. can the new beams still improve? # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion # below for more details. # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. if early_stopping == "never" and length_penalty > 0.0: best_running_score = state.running_scores[:, :1] / ( (max_length - decoder_prompt_len) ** length_penalty ) else: best_running_score = state.running_scores[:, :1] / ( (state.cur_len - decoder_prompt_len) ** length_penalty ) worst_finished_score = jnp.where( state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7) ) improvement_still_possible = jnp.any(best_running_score > worst_finished_score) # 3. is there still a beam that has not finished? still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True)) return not_max_length_yet & still_open_beam & improvement_still_possible def beam_search_body_fn(state, input_ids_length=1): """beam search state update fn.""" # 1. Forward current tokens # Collect the current position slice along length to feed the fast # autoregressive decoder model. Flatten the beam dimension into batch # dimension for feeding into the model. # unflatten beam dimension # Unflatten beam dimension in attention cache arrays input_token = flatten_beam_dim( lax.dynamic_slice( state.running_sequences, (0, 0, state.cur_len - input_ids_length), (batch_size, num_beams, input_ids_length), ) ) model_outputs = model(input_token, params=params, **state.model_kwargs) logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) cache = jax.tree_util.tree_map( lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values ) # adapt logits for FlaxMarianMTModel logits = self._adapt_logits_for_beam_search(logits) # 2. Compute log probs # get log probabilities from logits, # process logits with processors (*e.g.* min_length, ...), and # add new logprobs to existing running logprobs scores. log_probs = jax.nn.log_softmax(logits) log_probs = logits_processor( flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len ) log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams) log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2) vocab_size = log_probs.shape[2] log_probs = log_probs.reshape((batch_size, num_beams * vocab_size)) # 3. Retrieve top-K # Each item in batch has num_beams * vocab_size candidate sequences. # For each item, get the top 2*k candidates with the highest log- # probabilities. We gather the top 2*K beams here so that even if the best # K sequences reach EOS simultaneously, we have another K sequences # remaining to continue the live beam search. # Gather the top 2*K scores from _all_ beams. # Gather 2*k top beams. # Recover the beam index by floor division. # Recover token id by modulo division and expand Id array for broadcasting. # Update sequences for the 2*K top-k new sequences. beams_to_keep = 2 * num_beams topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep) topk_beam_indices = topk_indices // vocab_size topk_running_sequences = gather_beams( state.running_sequences, topk_beam_indices, batch_size, beams_to_keep ) topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2) topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len)) # 4. Check which sequences have ended # Update current sequences: # Did any of these sequences reach an end marker? # To prevent these just finished sequences from being added to the current sequences # set of active beam search sequences, set their log probs to a very large # negative value. did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7) # 5. Get running sequences scores for next # Determine the top k beam indices (from top 2*k beams) from log probs # and gather top k beams (from top 2*k beams). next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1] next_running_sequences, next_running_scores = gather_beams( [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams ) # 6. Process topk logits # Further process log probs: # - add length penalty # - make sure no scores can be added anymore if beam is full # - make sure still running sequences cannot be chosen as finalized beam topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty) beams_in_batch_are_full = jnp.broadcast_to( state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape ) & (early_stopping is True) add_penalty = ~did_topk_just_finished | beams_in_batch_are_full topk_log_probs += add_penalty * np.array(-1.0e7) # 7. Get scores, sequences, is sentence finished for next. # Combine sequences, scores, and flags along the beam dimension and compare # new finished sequence scores to existing finished scores and select the # best from the new set of beams merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1) merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1) merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1) topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1] next_sequences, next_scores, next_is_sent_finished = gather_beams( [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams ) # 8. Update model kwargs. # Determine the top k beam indices from the original set of all beams. # With these, gather the top k beam-associated caches. next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return BeamSearchState( cur_len=state.cur_len + 1, running_scores=next_running_scores, running_sequences=next_running_sequences, scores=next_scores, sequences=next_sequences, is_sent_finished=next_is_sent_finished, model_kwargs=next_model_kwargs, ) # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn` # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when # the very first prompt has sequence length > 1. state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state) if not trace: state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state) else: state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state) # Account for the edge-case where there are no finished sequences for a # particular batch item. If so, return running sequences for that batch item. none_finished = jnp.any(state.is_sent_finished, axis=1) sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences) scores = jnp.where(none_finished[:, None], state.scores, state.running_scores) # Take best beams for each batch (the score is sorted in descending order) sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) scores = flatten_beam_dim(scores[:, :num_return_sequences]) return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
transformers/src/transformers/generation/flax_utils.py/0
{ "file_path": "transformers/src/transformers/generation/flax_utils.py", "repo_id": "transformers", "token_count": 21734 }
299