text
stringlengths
29
317k
id
stringlengths
22
166
metadata
dict
__index_level_0__
int64
0
231
# coding=utf-8 # Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2.5-VL model.""" import gc import unittest import requests from transformers import ( AutoProcessor, Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, ) if is_torch_available(): import torch else: is_torch_greater_or_equal_than_2_0 = False if is_vision_available(): from PIL import Image class Qwen2_5_VLVisionText2TextModelTester: def __init__( self, parent, batch_size=3, seq_length=7, num_channels=3, ignore_index=-100, image_size=14, bos_token_id=0, eos_token_id=1, pad_token_id=2, vision_start_token_id=3, image_token_id=4, video_token_id=5, hidden_act="silu", hidden_size=32, vocab_size=99, intermediate_size=37, max_position_embeddings=512, max_window_layers=3, model_type="qwen2_5_vl", num_attention_heads=4, num_hidden_layers=4, num_key_value_heads=2, rope_theta=10000, tie_word_embeddings=True, is_training=True, vision_config={ "depth": 2, "in_chans": 3, "hidden_act": "silu", "intermediate_size": 32, "out_hidden_size": 32, "hidden_size": 32, "num_heads": 4, "patch_size": 14, "spatial_patch_size": 14, "spatial_merge_size": 1, "temporal_patch_size": 2, }, rope_scaling={"type": "mrope", "mrope_section": [2, 1, 1]}, ): self.parent = parent self.ignore_index = ignore_index self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.vision_start_token_id = vision_start_token_id self.image_token_id = image_token_id self.video_token_id = video_token_id self.hidden_act = hidden_act self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.max_position_embeddings = max_position_embeddings self.max_window_layers = max_window_layers self.model_type = model_type self.num_attention_heads = num_attention_heads self.num_hidden_layers = num_hidden_layers self.num_key_value_heads = num_key_value_heads self.rope_theta = rope_theta self.tie_word_embeddings = tie_word_embeddings self.vision_config = vision_config self.rope_scaling = rope_scaling self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.is_training = is_training self.vocab_size = vocab_size self.num_image_tokens = 32 self.seq_length = seq_length + self.num_image_tokens def get_config(self): return Qwen2_5_VLConfig( hidden_size=self.hidden_size, intermediate_size=self.intermediate_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, vision_config=self.vision_config, model_type=self.model_type, max_window_layers=self.max_window_layers, rope_scaling=self.rope_scaling, tie_word_embeddings=self.tie_word_embeddings, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, vision_start_token_id=self.vision_start_token_id, image_token_id=self.image_token_id, video_token_id=self.video_token_id, vocab_size=self.vocab_size, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[:, self.num_image_tokens] = self.image_token_id labels = torch.zeros( (self.batch_size, self.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict = { "pixel_values": pixel_values, "image_grid_thw": torch.tensor([[1, 1, 1]] * self.batch_size), "input_ids": input_ids, "attention_mask": attention_mask, "labels": labels, } return config, inputs_dict def create_and_check_qwen2_5_vl_model_fp16_forward( self, config, input_ids, pixel_values, attention_mask, image_grid_thw ): model = Qwen2_5_VLForConditionalGeneration(config=config) model.to(torch_device) model.half() model.eval() logits = model( input_ids=input_ids, attention_mask=attention_mask, image_grid_thw=image_grid_thw, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) def create_and_check_qwen2_5_vl_model_fp16_autocast_forward( self, config, input_ids, pixel_values, attention_mask, image_grid_thw ): config.torch_dtype = torch.float16 model = Qwen2_5_VLForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type="cuda", dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, image_grid_thw=image_grid_thw, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class Qwen2_5_VLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen2_5_VLForConditionalGeneration`. """ all_model_classes = (Qwen2_5_VLForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Qwen2_5_VLForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Qwen2_5_VLVisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2_5_VLConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_mismatching_num_image_tokens(self): """ Tests that VLMs through an error with explicit message saying what is wrong when number of images don't match number of image tokens in the text. Also we need to test multi-image cases when one prompr has multiple image tokens. """ config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) _ = model(**input_dict) # successfull forward with no modifications # remove one image but leave the image token in text patch_size = config.vision_config.patch_size one_img_length = (self.model_tester.image_size**2) // (patch_size**2) input_dict["pixel_values"] = input_dict["pixel_values"][-one_img_length:, ...] input_dict["image_grid_thw"] = input_dict["image_grid_thw"][-1:, ...] with self.assertRaises(ValueError): _ = model(**input_dict) # simulate multi-image case by concatenating inputs where each has exactly one image/image-token input_ids = input_dict["input_ids"][:1] pixel_values = input_dict["pixel_values"][:one_img_length] image_grid_thw = input_dict["image_grid_thw"][:1] input_ids = torch.cat([input_ids, input_ids], dim=0) # one image and two image tokens raise an error with self.assertRaises(ValueError): _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) # two images and two image tokens don't raise an error pixel_values = torch.cat([pixel_values, pixel_values], dim=0) image_grid_thw = torch.cat([image_grid_thw, image_grid_thw], dim=0) _ = model( input_ids=input_ids, pixel_values=pixel_values, image_grid_thw=image_grid_thw, ) @unittest.skip(reason="Feedforward chunking is not yet supported") def test_feed_forward_chunking(self): pass @unittest.skip(reason="CPU offload is not yet supported") def test_cpu_offload(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_bin(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Some undefined behavior encountered with test versions of this model. Skip for now.") def test_model_parallelism(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2_5_VL models") def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Compile not yet supported because in Qwen2_5_VL models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.") def test_multi_gpu_data_parallel_forward(self): pass @unittest.skip(reason="We cannot configure to output a smaller model.") def test_model_is_small(self): pass @unittest.skip( reason="Qwen2.5-VL can't do low-memory generation because position IDs have extra dimension and split function doesn't work for that" ) def test_beam_search_low_memory(self): pass @unittest.skip( reason="VLMs can't generate from inputs embeds and pixels. This can be tested as part of bacbone LM, no need to run the tes for VLMs" ) def test_generate_from_inputs_embeds_with_static_cache(self): pass @unittest.skip(reason="Can't compile fullgraph due to dynamic control flow in `prepare_inputs_for_generate`") def test_generate_compile_fullgraph(self): pass @require_torch class Qwen2_5_VLIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct") self.messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What kind of dog is this?"}, ], } ] url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg" self.image = Image.open(requests.get(url, stream=True).raw) def tearDown(self): gc.collect() torch.cuda.empty_cache() @slow def test_small_model_integration_test(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text], images=[self.image], return_tensors="pt") expected_input_ids = [151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151652, 151655, 151655] # fmt: skip assert torch.allclose(expected_input_ids, inputs.input_ids[0].tolist()[:17], atol=3e-3) expected_pixel_slice = torch.tensor( [ [0.8792, 0.8792, 0.9084], [1.1858, 1.1858, 1.2296], [1.2004, 1.2004, 1.2150], [1.4340, 1.4340, 1.4194], [1.3902, 1.4048, 1.4194], [1.5216, 1.5362, 1.5362], ], dtype=torch.float32, device="cpu", ) assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) # verify generation inputs = inputs.to(torch_device) output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets" self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular choices', 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_wo_image(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ 'system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets', 'system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am Qwen, a large language model created by Alibaba Cloud. I am designed to assist with various tasks and answer questions to the best of my' ] # fmt: skip self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch_different_resolutions(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype="auto", device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) text2 = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) image2 = self.image.resize((224, 224)) inputs = self.processor( text=[text, text2], images=[self.image, image2], padding=True, return_tensors="pt", ).to(torch_device) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text], images=[self.image, self.image], return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True)[0], self.processor.batch_decode(output, skip_special_tokens=True)[1], ) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_wo_image_flashatt2(self): model = Qwen2_5_VLForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-VL-7B-Instruct", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) messages2 = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Who are you?"}, ] text2 = self.processor.apply_chat_template(messages2, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=[text, text2], images=[self.image], padding=True, return_tensors="pt").to( torch_device ) # it should not matter whether two images are the same size or not output = model.generate(**inputs, max_new_tokens=30) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat kind of dog is this?\nassistant\nThe dog in the picture appears to be a Labrador Retriever. Labradors are known for their friendly and intelligent nature, making them popular pets", "system\nYou are a helpful assistant.\nuser\nWho are you?\nassistant\nI am Qwen, a large language model created by Alibaba Cloud. I am designed to answer a wide range of questions and provide information on various topics", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, )
transformers/tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py/0
{ "file_path": "transformers/tests/models/qwen2_5_vl/test_modeling_qwen2_5_vl.py", "repo_id": "transformers", "token_count": 10316 }
203
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the Tensorflow ResNet model.""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class TFResNetModelTester: def __init__( self, parent, batch_size=3, image_size=32, num_channels=3, embeddings_size=10, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 2, 1], is_training=True, use_labels=True, hidden_act="relu", num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.embeddings_size = embeddings_size self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return ResNetConfig( num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, ) def create_and_check_model(self, config, pixel_values, labels): model = TFResNetModel(config=config) result = model(pixel_values) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.num_labels model = TFResNetForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFResNetModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () pipeline_model_mapping = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False has_attentions = False def setUp(self): self.model_tester = TFResNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ResNetConfig, has_text_modality=False) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="ResNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="ResNet does not support input and output embeddings") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() layers_type = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: config.layer_type = layer_type inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "microsoft/resnet-50" model = TFResNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class TFResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("microsoft/resnet-50") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = TFResNetForImageClassification.from_pretrained("microsoft/resnet-50") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="tf") # forward pass outputs = model(**inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-11.1069, -9.7877, -8.3777]) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy(), expected_slice, atol=1e-4))
transformers/tests/models/resnet/test_modeling_tf_resnet.py/0
{ "file_path": "transformers/tests/models/resnet/test_modeling_tf_resnet.py", "repo_id": "transformers", "token_count": 3760 }
204
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class TFRoFormerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = RoFormerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFRoFormerForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFRoFormerForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFRoFormerForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFRoFormerForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFRoFormerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: add `prepare_inputs_for_generation` for `TFRoFormerForCausalLM` def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if pipeline_test_case_name == "TextGenerationPipelineTests": return True return False def setUp(self): self.model_tester = TFRoFormerModelTester(self) self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base") self.assertIsNotNone(model) @require_tf class TFRoFormerModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.12053341, -1.0264901, 0.29221946], [-1.5133783, 0.197433, 0.15190607], [-5.0135403, -3.900256, -0.84038764], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) @require_tf class TFRoFormerSinusoidalPositionalEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_basic(self): input_ids = tf.constant([[4, 10]]) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6) emb = emb1(input_ids.shape) desired_weights = tf.constant( [[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] ) tf.debugging.assert_near(emb, desired_weights, atol=self.tolerance) def test_positional_emb_weights_against_roformer(self): desired_weights = tf.constant( [ [0.0000, 0.0000, 0.0000, 0.0000, 0.0000], [0.8415, 0.8219, 0.8020, 0.7819, 0.7617], [0.9093, 0.9364, 0.9581, 0.9749, 0.9870], ] ) emb1 = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512) emb1([2, 16, 512]) weights = emb1.weight[:3, :5] tf.debugging.assert_near(weights, desired_weights, atol=self.tolerance) @require_tf class TFRoFormerSelfAttentionRotaryPositionEmbeddingTest(unittest.TestCase): tolerance = 1e-4 def test_apply_rotary_position_embeddings(self): # 2,12,16,64 query_layer = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 key_layer = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.float32), shape=(2, 12, 16, 64)) / 100 embed_positions = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64) sinusoidal_pos = embed_positions([2, 16, 768])[None, None, :, :] query_layer, key_layer = TFRoFormerSelfAttention.apply_rotary_position_embeddings( sinusoidal_pos, query_layer, key_layer ) desired_query_layer = tf.constant( [ [0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700], [-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343], [-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985], [-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871], [0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980], [3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253], ] ) desired_key_layer = tf.constant( [ [0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700], [0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343], [1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985], [2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871], [-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980], [-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8], desired_query_layer, atol=self.tolerance) tf.debugging.assert_near(key_layer[0, 0, :6, :8], desired_key_layer, atol=self.tolerance)
transformers/tests/models/roformer/test_modeling_tf_roformer.py/0
{ "file_path": "transformers/tests/models/roformer/test_modeling_tf_roformer.py", "repo_id": "transformers", "token_count": 7875 }
205
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, PreTrainedTokenizerFast, SeamlessM4TTokenizer, SeamlessM4TTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.m2m_100.modeling_m2m_100 import shift_tokens_right EN_CODE = 256047 RO_CODE = 256145 SMALL_TRAINING_CORPUS = [ ["This is the first sentence.", "This is the second one."], ["This sentence (contains #) over symbols and numbers 12 3.", "But not this one."], ] @require_sentencepiece @require_tokenizers class SeamlessM4TTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "facebook/hf-seamless-m4t-medium" tokenizer_class = SeamlessM4TTokenizer rust_tokenizer_class = SeamlessM4TTokenizerFast test_rust_tokenizer = True test_sentencepiece = True from_pretrained_kwargs = {} def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @unittest.skip(reason="This fails currently and is a blocker. No idea why TODO @ylacombe") def test_maximum_encoding_length_single_input(self): tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): seq_0, ids = self.get_clean_sequence(tokenizer, max_length=20) sequence = tokenizer.encode(seq_0, add_special_tokens=False) total_length = len(sequence) self.assertGreater( total_length, 4, "Issue with the testing sequence, please update it, it's too short" ) # Test with max model input length model_max_length = tokenizer.model_max_length self.assertEqual(model_max_length, 100) seq_1 = seq_0 * model_max_length sequence1 = tokenizer(seq_1, add_special_tokens=False) total_length1 = len(sequence1["input_ids"]) self.assertGreater( total_length1, model_max_length, "Issue with the testing sequence, please update it, it's too short", ) # Simple padding_strategies = ( [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] ) for padding_state in padding_strategies: with self.subTest(f"Padding: {padding_state}"): for truncation_state in [True, "longest_first", "only_first"]: with self.subTest(f"Truncation: {truncation_state}"): output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"]), model_max_length) output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state) self.assertEqual(len(output["input_ids"][0]), model_max_length) # Simple with no truncation # Reset warnings tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer(seq_1, padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) tokenizer.deprecation_warnings = {} with self.assertLogs("transformers", level="WARNING") as cm: output = tokenizer([seq_1], padding=padding_state, truncation=False) self.assertNotEqual(len(output["input_ids"][0]), model_max_length) self.assertEqual(len(cm.records), 1) self.assertTrue( cm.records[0].message.startswith( "Token indices sequence length is longer than the specified maximum sequence length" " for this model" ) ) # Overflowing tokens stride = 2 # modify padding because it's activated by default in seamlessM4T information = tokenizer( seq_0, max_length=total_length - 2, add_special_tokens=False, stride=stride, truncation="longest_first", return_overflowing_tokens=True, padding=False, # add_prefix_space=False, ) # Overflowing tokens are handled quite differently in slow and fast tokenizers if isinstance(tokenizer, PreTrainedTokenizerFast): truncated_sequence = information["input_ids"][0] overflowing_tokens = information["input_ids"][1] self.assertEqual(len(information["input_ids"]), 2) self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) else: truncated_sequence = information["input_ids"] overflowing_tokens = information["overflowing_tokens"] self.assertEqual(len(truncated_sequence), total_length - 2) self.assertEqual(truncated_sequence, sequence[:-2]) self.assertEqual(len(overflowing_tokens), 2 + stride) self.assertEqual(overflowing_tokens, sequence[-(2 + stride) :]) @unittest.skip(reason="By defaults, uses pad_to_multiple_of which breaks the test") def test_maximum_encoding_length_pair_input(self): pass def test_padding_to_multiple_of(self): tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): if tokenizer.pad_token is None: self.skipTest(reason="No padding token.") else: empty_tokens = tokenizer("", padding=True, pad_to_multiple_of=8) normal_tokens = tokenizer("This is a sample input", padding=True, pad_to_multiple_of=8) for key, value in empty_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # default to padding=True so need to precise which padding is called normal_tokens = tokenizer("This", pad_to_multiple_of=8, padding=False) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = tokenizer("This", padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, tokenizer.__call__, "This", padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) @require_torch def test_prepare_seq2seq_batch(self): if not self.test_seq2seq: self.skipTest(reason="test_seq2seq is set to False") tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Longer text that will definitely require truncation. src_text = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng", tgt_lang="ron", pad_to_multiple_of=None, ) except NotImplementedError: self.skipTest(reason="Encountered NotImplementedError when calling prepare_seq2seq_batch") self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.labels.shape[1], 10) # TODO: not working for tgt_text # max_target_length will default to max_length if not specified batch = tokenizer.prepare_seq2seq_batch( src_texts=src_text, tgt_texts=tgt_text, max_length=4, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch.input_ids.shape[1], 4) self.assertEqual(batch.labels.shape[1], 4) batch_encoder_only = tokenizer.prepare_seq2seq_batch( src_texts=src_text, max_length=4, max_target_length=10, return_tensors="pt", pad_to_multiple_of=None, ) self.assertEqual(batch_encoder_only.input_ids.shape[1], 4) self.assertEqual(batch_encoder_only.attention_mask.shape[1], 4) self.assertNotIn("decoder_input_ids", batch_encoder_only) @unittest.skip(reason="Unfortunately way too slow to build a BPE with SentencePiece.") def test_save_slow_from_fast_and_reload_fast(self): pass # Copied from tests.models.nllb.test_tokenization_nllb.NllbTokenizationTest.test_special_tokens_initialization def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) r_output = tokenizer_r.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertTrue(special_token_id in r_output) if self.test_slow_tokenizer: tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, # , from_slow=True <- unfortunately too slow to convert ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in cr_output) @unittest.skip( "encode_plus and batch_encode_plus are deprecated and __call__ do some processing, so we expect different results." ) def test_call(self): pass def test_training_new_tokenizer(self): # This feature only exists for fast tokenizers if not self.test_rust_tokenizer: self.skipTest(reason="test_rust_tokenizer is set to False") tokenizer = self.get_rust_tokenizer() new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) # Test we can use the new tokenizer with something not seen during training inputs = new_tokenizer(["This is the first sentence", "This sentence is different 🤗."]) self.assertEqual(len(inputs["input_ids"]), 2) decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) expected_result = "This is the first sentence" if tokenizer.backend_tokenizer.normalizer is not None: expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) self.assertEqual(expected_result, decoded_input) # We check that the parameters of the tokenizer remained the same # Check we have the same number of added_tokens for both pair and non-pair inputs. # make sure it has the same prefix tokens first new_tokenizer.tgt_lang = tokenizer.tgt_lang tokenizer.tgt_lang = tokenizer.tgt_lang self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) # Check we have the correct max_length for both pair and non-pair inputs. self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) # Assert the set of special tokens match as we didn't ask to change them self.assertSequenceEqual( tokenizer.all_special_tokens_extended, new_tokenizer.all_special_tokens_extended, ) self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) @unittest.skip(reason="Fails because of the hack of adding <unk> in _tokenize") def test_pickle_subword_regularization_tokenizer(self): pass @unittest.skip(reason="Fails because of the hack of adding <unk> in _tokenize") def test_subword_regularization_tokenizer(self): pass @require_torch @require_sentencepiece @require_tokenizers class SeamlessM4TDistilledIntegrationTest(unittest.TestCase): checkpoint_name = "facebook/hf-seamless-m4t-medium" src_text = [ " UN Chief Says There Is No Military Solution in Syria", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""", ] tgt_text = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] expected_src_tokens = [256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 3] # fmt: skip @classmethod def setUpClass(cls): cls.tokenizer: SeamlessM4TTokenizer = SeamlessM4TTokenizer.from_pretrained( cls.checkpoint_name, src_lang="eng", tgt_lang="ron" ) # cls.pad_token_id = 1 return cls def test_language_codes(self): self.assertEqual(self.tokenizer.convert_tokens_to_ids("__ace_Latn__"), 256002) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__shn__"), 256152) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__eng__"), 256047) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__fra__"), 256057) self.assertEqual(self.tokenizer.convert_tokens_to_ids("__quy__"), 256144) def test_tokenizer_tgt_lang(self): ids = self.tokenizer(self.src_text, src_lang="fra").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256057, ids[0]) rest_ids = ids[len(self.expected_src_tokens) :] self.assertListEqual([0] * len(rest_ids), rest_ids) ids = self.tokenizer(self.src_text, src_lang="__shn__").input_ids[0] self.assertListEqual(self.expected_src_tokens[1:], ids[1 : len(self.expected_src_tokens)]) self.assertEqual(256152, ids[0]) # Copied from tests.models.nllb.test_tokenization_nllb.NllbDistilledIntegrationTest.test_enro_tokenizer_decode_ignores_language_codes def test_enro_tokenizer_decode_ignores_language_codes(self): self.assertIn(RO_CODE, self.tokenizer.all_special_ids) generated_ids = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: skip result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) expected_romanian = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) self.assertEqual(result, expected_romanian) self.assertNotIn(self.tokenizer.eos_token, result) def test_enro_tokenizer_truncation(self): src_text = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0], str) desired_max_length = 10 ids = self.tokenizer(src_text, max_length=desired_max_length, truncation=True).input_ids[0] self.assertEqual(ids[-1], 3) self.assertEqual(ids[0], EN_CODE) self.assertEqual(len(ids), desired_max_length) @require_torch def test_enro_tokenizer_prepare_batch(self): batch = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=True, truncation=True, max_length=len(self.expected_src_tokens), pad_to_multiple_of=None, return_tensors="pt", ) batch["decoder_input_ids"] = shift_tokens_right( batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.convert_tokens_to_ids("__ron__") ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 15), batch.input_ids.shape) self.assertEqual((2, 15), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, result) self.assertEqual(RO_CODE, batch.decoder_input_ids[0, 0]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE]) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id]) def test_seq2seq_max_length(self): batch = self.tokenizer( self.src_text, padding=True, truncation=True, max_length=3, return_tensors="pt", pad_to_multiple_of=None ) targets = self.tokenizer( text_target=self.tgt_text, padding=True, truncation=True, max_length=10, return_tensors="pt" ) labels = targets["input_ids"] batch["decoder_input_ids"] = shift_tokens_right( labels, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.convert_tokens_to_ids(self.tokenizer.tgt_lang), ) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def test_tokenizer_translation(self): inputs = self.tokenizer._build_translation_inputs( "A test", return_tensors="pt", src_lang="eng", tgt_lang="fra" ) self.assertEqual( nested_simplify(inputs), { # A, test, EOS, en_XX "input_ids": [[256047, 70, 7356, 3]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 256057, }, ) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SeamlessM4TTokenizer(SAMPLE_VOCAB, extra_ids=0, add_bos_token=False, legacy=False) tokenizer.add_special_tokens({"additional_special_tokens": [AddedToken("<s>", rstrip=False, lstrip=False)]}) cls.tokenizer = tokenizer return cls def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(". Hello") # [bos, lang_id, _] + offset_sp_encode self.assertEqual(input_ids[:-1], [3, 1, 8] + [i + self.tokenizer.fairseq_offset for i in sp_encode]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten. Since the sample vocab does not have # `______`. sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute is set to False input_ids = self.tokenizer.encode(" . Hello") self.assertEqual(input_ids, [3, 1, 8, 5, 157, 87, 21, 3]) sp_encode = self.tokenizer.sp_model.encode(" . Hello") self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], [7] + sp_encode) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁", ".", "▁He", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 3]) tokens = self.tokenizer.tokenize("▁He is not") sp_encode = [ self.tokenizer.sp_model.piece_to_id("▁He"), self.tokenizer.sp_model.piece_to_id("▁is"), self.tokenizer.sp_model.piece_to_id("▁not"), ] self.assertEqual([i - self.tokenizer.fairseq_offset for i in input_ids[2:-1]], sp_encode) self.assertEqual(tokens, ["▁He", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not<s> ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 2, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not<s> ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "<s>", "▁He"]) # spaces are eaten by spm + our strip # make sure that the output after the extra id is the same as if # extra_id was not there input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [3, 1, 157, 47, 45, 157, 3]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁He", "▁is", "▁not", "▁He"]) # spaces are eaten by spm even if not start def test_character_after_special_token(self): # Make sure that `tokenizer.tokenize` is similar to # adding the equivalent special token to the vocab input_ids = self.tokenizer.encode("Hey <s>I") self.assertEqual(input_ids, [3, 1, 157, 31, 2, 101, 3]) sp_encode = self.tokenizer.sp_model.encode("Hey .I") # the last token besides eos should be 100 offset self.assertEqual(input_ids[-2] - self.tokenizer.fairseq_offset, sp_encode[-1]) tokens = self.tokenizer.tokenize("<s>I") self.assertEqual(tokens, ["<s>", "I"]) input_ids = self.tokenizer.encode("Hello, <s>,") self.assertEqual(input_ids, [3, 1, 157, 87, 21, 4, 2, 4, 3]) tokens = self.tokenizer.tokenize("Hello, <s>,") self.assertEqual(tokens, ["▁He", "ll", "o", ",", "<s>", ","]) def test_special_tokens_strip(self): input_ids = self.tokenizer.encode(" <s> ,") self.assertEqual(input_ids, [3, 1, 2, 8, 4, 3]) tokens = self.tokenizer.tokenize(" <s> ,") # spaces are eaten by rstrip / lstrip + spm sp_model.encode(" ") = [] self.assertEqual(tokens, ["<s>", "▁", ","]) input_ids = self.tokenizer.encode("No <s> ▁He") self.assertEqual(input_ids, [3, 1, 285, 2, 157, 3]) tokens = self.tokenizer.tokenize("No <s> ▁He") self.assertEqual(tokens, ["▁No", "<s>", "▁He"]) # spaces are eaten by rstrip / lstrip
transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py/0
{ "file_path": "transformers/tests/models/seamless_m4t/test_tokenization_seamless_m4t.py", "repo_id": "transformers", "token_count": 15005 }
206
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch SigLIP model.""" import inspect import os import tempfile import unittest from typing import Tuple import numpy as np import requests from parameterized import parameterized from pytest import mark from transformers import SiglipConfig, SiglipTextConfig, SiglipVisionConfig from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, require_torch_sdpa, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, is_torch_sdpa_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, is_flaky, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SiglipForImageClassification, SiglipModel, SiglipTextModel, SiglipVisionModel if is_torch_sdpa_available(): from torch.nn.attention import SDPBackend, sdpa_kernel if is_vision_available(): from PIL import Image from transformers import SiglipProcessor class SiglipModelTesterMixin(ModelTesterMixin): def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) # SigLip has one shared cls attr for all models, so we assign both submodels heer vision_attn = text_attn = "sdpa" if model._supports_sdpa else "eager" if hasattr(model_sdpa, "vision_model") and hasattr(model_sdpa, "text_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == vision_attn) self.assertTrue(model_sdpa.text_model.config._attn_implementation == text_attn) self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.text_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") has_sdpa = False for name, submodule in model_sdpa.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: has_sdpa = True break if not has_sdpa and model_sdpa.config.model_type != "falcon": raise ValueError("The SDPA model should have SDPA attention layers") def test_eager_matches_sdpa_inference( self, torch_dtype: str, use_attention_mask_options: Tuple[bool, ...] = (True, False), logit_keys: Tuple[str, ...] = ("logits_per_image", "logits_per_text", "image_embeds", "text_embeds"), ): if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Convert to torch dtype dtypes = { "float16": torch.float16, "bfloat16": torch.bfloat16, "float32": torch.float32, } torch_dtype = dtypes[torch_dtype] atols = { torch.float32: 1e-5, torch.bfloat16: 3e-2, torch.float16: 5e-3, } rtols = { torch.float32: 1e-4, torch.bfloat16: 3e-2, torch.float16: 5e-3, } atol = atols[torch_dtype] rtol = rtols[torch_dtype] def get_mean_reldiff(msg, current_case, x, ref, atol, rtol): return f"{msg} {current_case}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving the model each time, # but it would be nicer to have an efficient way to use parameterized.expand cases = [ (use_mask, output_attentions, sdpa_backend, batch_size) for use_mask in use_attention_mask_options for output_attentions in [True, False] for sdpa_backend in [ SDPBackend.MATH, [SDPBackend.FLASH_ATTENTION, SDPBackend.MATH], [SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH], [SDPBackend.FLASH_ATTENTION, SDPBackend.EFFICIENT_ATTENTION, SDPBackend.MATH], ] for batch_size in [1, 5] ] fail_cases = [] for use_mask, output_attentions, sdpa_backend, batch_size in cases: processed_inputs = inputs_dict.copy() # convert to torch_dtype if "pixel_values" in processed_inputs: processed_inputs["pixel_values"] = processed_inputs["pixel_values"].to(torch_dtype) # slice for different batch sizes for key in ["pixel_values", "input_ids", "attention_mask"]: if key in processed_inputs: processed_inputs[key] = processed_inputs[key][:batch_size] # set attention mask with left padding if not use_mask: processed_inputs.pop("attention_mask", None) else: dummy_attention_mask = processed_inputs["attention_mask"] dummy_attention_mask[:] = 1 dummy_attention_mask[:, :1] = 0 processed_inputs["attention_mask"] = dummy_attention_mask processed_inputs["output_attentions"] = output_attentions processed_inputs["output_hidden_states"] = True current_case = ( f"padding_side=left, use_mask={use_mask}, batch_size={batch_size}, sdpa_backend={sdpa_backend}" ) prepared_inputs = self._prepare_for_class(processed_inputs, model_class) with torch.no_grad(): try: with sdpa_kernel(sdpa_backend): outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) except Exception as e: fail_cases.append(f"{current_case}: {e}") continue for key in logit_keys: eager_logits = outputs_eager[key] sdpa_logits = outputs_sdpa[key] if use_mask: eager_logits = eager_logits[:, 1:] sdpa_logits = sdpa_logits[:, 1:] is_close = torch.allclose(eager_logits, sdpa_logits, atol=atol, rtol=rtol) if not is_close: fail_cases.append(get_mean_reldiff(key, current_case, sdpa_logits, eager_logits, atol, rtol)) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) class SiglipVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope # in ViT, the seq length equals the number of patches num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return SiglipVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values): model = SiglipVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPVisionModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipVisionModelTest(SiglipModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (SiglipVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = SiglipVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=SiglipVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SIGLIP does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipVisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipVisionModel has no base class and is not available in MODEL_MAPPING") def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("pooler_output", "last_hidden_state"), use_attention_mask_options=(False,), ) @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() class SiglipTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return SiglipTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = SiglipTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTester.prepare_config_and_inputs_for_common def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SiglipTextModelTest(SiglipModelTesterMixin, unittest.TestCase): all_model_classes = (SiglipTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.setUp with CLIP->Siglip def setUp(self): self.model_tester = SiglipTextModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipTextConfig, hidden_size=37) # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_config def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipTextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip does not use inputs_embeds") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_from_base def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="SiglipTextModel has no base class and is not available in MODEL_MAPPING") # Copied from tests.models.clip.test_modeling_clip.CLIPTextModelTest.test_save_load_fast_init_to_base def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipTextModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("pooler_output", "last_hidden_state"), use_attention_mask_options=(False, True), ) @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() class SiglipModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = SiglipTextModelTester(parent, **text_kwargs) self.vision_model_tester = SiglipVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training # Copied from tests.models.clip.test_modeling_clip.CLIPModelTester.prepare_config_and_inputs def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return SiglipConfig.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = SiglipModel(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "return_loss": False, } return config, inputs_dict @require_torch class SiglipModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": SiglipModel} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = SiglipModelTester(self) self.config_tester = ConfigTester(self, config_class=SiglipConfig, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_hidden_states_output def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_inputs_embeds def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_retain_grad_hidden_states_attentions def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="SiglipModel does not have input/output embeddings") # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_model_get_set_embeddings def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest._create_and_check_torchscript with CLIP->Siglip def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # Siglip needs pixel_values traced_model = torch.jit.trace(model, (input_ids, pixel_values)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Copied from tests.models.clip.test_modeling_clip.CLIPModelTest.test_load_vision_text_config with CLIP->Siglip def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save SiglipConfig and check if we can load SiglipVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = SiglipVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save SiglipConfig and check if we can load SiglipTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = SiglipTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # Test with attention mask dummy_attention_mask = inputs_dict["attention_mask"] if dummy_attention_mask is not None: dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 outputs = model( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) # check with inference + dropout model.train() _ = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, attention_mask=dummy_attention_mask, output_hidden_states=True, ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest("SigLIP does not support right padding") @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("logits_per_image", "logits_per_text", "image_embeds", "text_embeds"), use_attention_mask_options=(False, True), ) @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() class SiglipForImageClassificationModelTester(SiglipModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SiglipForImageClassificationModelTest(SiglipModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (SiglipForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": SiglipForImageClassification} if is_torch_available() else {} fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["SiglipMultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = SiglipForImageClassificationModelTester(self) @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SiglipForImageClassification does not support inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SiglipForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa @slow @is_flaky() def test_eager_matches_sdpa_inference(self, torch_dtype: str): super().test_eager_matches_sdpa_inference( torch_dtype=torch_dtype, logit_keys=("logits",), use_attention_mask_options=(False,) ) @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): super().test_sdpa_can_dispatch_composite_models() # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) return image @require_vision @require_torch class SiglipModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) processor = SiglipProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of 2 cats", "a photo of 2 dogs"], images=image, padding="max_length", return_tensors="pt" ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image logits_per_text = outputs.logits_per_text # verify the logits self.assertEqual( logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[-0.7567, -10.3354]], device=torch_device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) # verify the probs probs = torch.sigmoid(logits_per_image) # these are the probabilities expected_probs = torch.tensor([[3.1937e-01, 3.2463e-05]], device=torch_device) torch.testing.assert_close(probs, expected_probs, rtol=1e-3, atol=1e-3) @slow def test_inference_interpolate_pos_encoding(self): model_name = "google/siglip-base-patch16-224" model = SiglipModel.from_pretrained(model_name).to(torch_device) # 640 x 480 image image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") processor = SiglipProcessor.from_pretrained(model_name, do_resize=False, size={"height": 480, "width": 640}) inputs = processor(text="what's in the image", images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, interpolate_pos_encoding=True) # verify the shape # patch size = 16 # batch size 1, (640/16) * (480/16) = 1200 patches, 768 hidden size expected_shape = torch.Size((1, 1200, 768)) self.assertEqual(outputs.vision_model_output.last_hidden_state.shape, expected_shape)
transformers/tests/models/siglip/test_modeling_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_modeling_siglip.py", "repo_id": "transformers", "token_count": 19205 }
207
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import T5Config, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ByT5Tokenizer, T5Tokenizer, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model class TFT5ModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_labels = True self.vocab_size = 99 self.n_positions = 14 self.hidden_size = 32 self.num_hidden_layers = 2 self.num_attention_heads = 4 self.d_ff = 37 self.relative_attention_num_buckets = 8 self.dropout_rate = 0.1 self.initializer_factor = 0.002 self.eos_token_id = 1 self.pad_token_id = 0 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_labels = None if self.use_labels: token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = T5Config( vocab_size=self.vocab_size, n_positions=self.n_positions, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, ) return (config, input_ids, input_mask, token_labels) def create_and_check_t5_model(self, config, input_ids, input_mask, token_labels): model = TFT5Model(config=config) inputs = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs) result = model(input_ids, decoder_attention_mask=input_mask, decoder_input_ids=input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertListEqual(list(encoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) self.parent.assertListEqual(list(decoder_output.shape), [self.batch_size, self.seq_length, self.hidden_size]) # There should be `num_layers` key value embeddings stored in decoder_past[1] self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past[1] tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_t5_with_lm_head(self, config, input_ids, input_mask, token_labels): model = TFT5ForConditionalGeneration(config=config) inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } result = model(inputs_dict) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_t5_decoder_model_past(self, config, input_ids, decoder_input_ids, attention_mask): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) output_from_no_past = model(next_input_ids)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values)[0] # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() # create attention mask half_seq_length = self.seq_length // 2 attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32) attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32) attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1) # first forward pass outputs = model(input_ids, attention_mask=attn_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1 random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size) vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change) condition = tf.transpose( tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size)) ) input_ids = tf.where(condition, random_other_next_tokens, input_ids) # append to next input_ids and attn_mask next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) attn_mask = tf.concat( [attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)], axis=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)[0] output_from_past = model(next_tokens, past_key_values=outputs.past_key_values, attention_mask=attn_mask)[0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).numpy().item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx] output_from_past_slice = output_from_past[:, 0, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def create_and_check_t5_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask ): model = TFT5Model(config=config).get_decoder() input_ids = input_ids[:1, :] attention_mask = attention_mask[:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, past_key_values=outputs.past_key_values )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, token_labels) = config_and_inputs inputs_dict = { "input_ids": input_ids, "decoder_input_ids": input_ids, "decoder_attention_mask": input_mask, } return config, inputs_dict @require_tf class TFT5ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): is_encoder_decoder = True all_model_classes = (TFT5Model, TFT5ForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFT5ForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = ( { "feature-extraction": TFT5Model, "summarization": TFT5ForConditionalGeneration, "text2text-generation": TFT5ForConditionalGeneration, "translation": TFT5ForConditionalGeneration, } if is_tf_available() else {} ) test_onnx = False def setUp(self): self.model_tester = TFT5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_t5_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_model(*config_and_inputs) def test_t5_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_t5_model(config, *config_and_inputs[1:]) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_with_lm_head(*config_and_inputs) def test_t5_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_past(*config_and_inputs) def test_t5_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_t5_decoder_model_attention_mask_past(*config_and_inputs) def test_t5_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # `create_and_check_t5_decoder_model_past_large_inputs` has special inputs: # (config, input_ids, decoder_input_ids, attention_mask) # and we have to prepare it correctly here. config, input_ids, input_mask, token_labels = config_and_inputs config_and_inputs = (config, input_ids, None, input_mask) self.model_tester.create_and_check_t5_decoder_model_past_large_inputs(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TFT5Model.from_pretrained("google-t5/t5-small") self.assertIsNotNone(model) def test_generate_with_headmasking(self): # TODO: Fix head-masking according to PyTorch T5 model pass # This test is run in `TFT5EncoderOnlyModelTest`, where the main layer has the same inputs as the model @unittest.skip(reason="The inputs of the Main Layer are different.") def test_keras_save_load(self): pass class TFT5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = TFT5EncoderModel(config=config) result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.shape, (self.batch_size, self.encoder_seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class TFT5EncoderOnlyModelTest(TFModelTesterMixin, unittest.TestCase): is_encoder_decoder = False all_model_classes = (TFT5EncoderModel,) if is_tf_available() else () test_onnx = False def setUp(self): self.model_tester = TFT5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) # is not able to be part of a pipeline def test_train_pipeline_custom_model(self): pass @require_tf @require_sentencepiece @require_tokenizers class TFT5GenerationIntegrationTests(unittest.TestCase): @slow def test_greedy_xla_generate_simple(self): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") # two examples with different lengths to confirm that attention masks are operational in XLA sentences = [ "Translate English to German: Today is a beautiful day.", "Translate English to German: I have four cats, three dogs, two birds, and a horse.", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids xla_generate = tf.function(model.generate, jit_compile=True) output_ids = model.generate(input_ids) output_ids_xla = xla_generate(input_ids) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) expected_output_string = [ "Heute ist ein schöner Tag.", "Ich habe vier Katzen, drei Hunde, zwei Vögel und ein Pferd.", ] self.assertListEqual(expected_output_string, output_strings) self.assertListEqual(expected_output_string, output_strings_xla) @slow def test_t5_greedy_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") sentences = ["Yesterday, my name was", "Today is a beautiful day and"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Yesterday, my name was", "Heute ist ein schöne Tag und"] self.assertListEqual(expected_output_string, output_strings) @slow def test_sample_xla_generate_simple(self): # NOTE: due to the small numerical differences that are natural when we compile to XLA, sampling the same # output out of the same seed is far from guaranteed. We can, however, confirm that the results are sensible # and that we can seed both versions. # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") sentence = "Translate English to German: I have two bananas" input_ids = tokenizer(sentence, return_tensors="tf", padding=True).input_ids expected_output_string = ["Ich habe zwei Bananen"] expected_output_string_xla = ["Ich habe 2 Bananen"] # seed set -> deterministic sampling sequence -> deterministic generation output_ids = model.generate(input_ids, do_sample=True, seed=[42, 0]) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) self.assertListEqual(expected_output_string, output_strings) xla_generate = tf.function(model.generate, jit_compile=True) # seed set -> deterministic sampling sequence -> deterministic generation output_ids_xla = xla_generate(input_ids, do_sample=True, seed=[42, 0]) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) self.assertListEqual(expected_output_string_xla, output_strings_xla) @slow def test_t5_sample_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "do_sample": True, "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "repetition_penalty": 2.2, "temperature": 0.8, "top_k": 500, "top_p": 0.9, "seed": [20, 0], # seed set -> deterministic sampling sequence -> deterministic generation } # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(":/CPU:0"): output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["- I really love my way of this.", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) # TODO (ydshieh): undo skip once a fix is done on TF side. @unittest.skip("Skip for now as TF 2.13 breaks it on GPU") @slow def test_beam_search_xla_generate_simple(self): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") # tests XLA with task specific arguments task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_fr", {}) model.config.update(translation_config) # two examples with different lengths to confirm that attention masks are operational in XLA sentences = [ model.config.prefix + "Today is a beautiful day.", model.config.prefix + "I have four cats, three dogs, two birds, and a horse.", ] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids xla_generate = tf.function(model.generate, jit_compile=True) output_ids = model.generate(input_ids, num_beams=2) output_ids_xla = xla_generate(input_ids, num_beams=2) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) output_strings_xla = tokenizer.batch_decode(output_ids_xla, skip_special_tokens=True) expected_output_string = [ "Aujourd'hui est une belle journée.", "J'ai quatre chats, trois chiens, deux oiseaux et un cheval.", ] self.assertListEqual(expected_output_string, output_strings) self.assertListEqual(expected_output_string, output_strings_xla) @slow def test_beam_search_generate(self): model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") sentences = ["I really love my", "Translate English to German: the transformers are truly amazing"] input_ids = tokenizer(sentences, return_tensors="tf", padding=True).input_ids generation_kwargs = { "bad_words_ids": [tokenizer("my").input_ids, tokenizer("ein schöner").input_ids], "no_repeat_ngram_size": 3, "do_sample": False, "repetition_penalty": 2.2, "num_beams": 4, } output_ids = model.generate(input_ids, **generation_kwargs) output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) expected_output_string = ["Ich liebe es so sehr!", "die Transformatoren sind wirklich erstaunlich"] self.assertListEqual(expected_output_string, output_strings) @require_tf @require_sentencepiece @require_tokenizers class TFT5ModelIntegrationTests(unittest.TestCase): @cached_property def model(self): return TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-base") @slow def test_small_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("google-t5/t5-small") tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -4.771147 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1.1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1.1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small") tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -14.757326 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparision run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = TFT5ForConditionalGeneration.from_pretrained("google/byt5-small") tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="tf").input_ids labels = tokenizer("Hi I am", return_tensors="tf").input_ids loss = model(input_ids, labels=labels).loss mtf_score = -tf.math.reduce_mean(loss).numpy() EXPECTED_SCORE = -7.592465 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = T5Tokenizer.from_pretrained("google-t5/t5-base") FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says .", "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well .", "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime .", "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 .", ] task_specific_config = getattr(model.config, "task_specific_params", {}) summarization_config = task_specific_config.get("summarization", {}) model.config.update(summarization_config) dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], max_length=512, padding="max_length", truncation=True, return_tensors="tf", ) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = [ tok.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in hypotheses_batch ] self.assertListEqual( expected_summaries, decoded, ) @slow def test_translation_en_to_de(self): tok = T5Tokenizer.from_pretrained("google-t5/t5-base") model = self.model task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_de", {}) self.model.config.update(translation_config) original_input = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '"Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.' ) input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model tok = T5Tokenizer.from_pretrained("google-t5/t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_fr", {}) model.config.update(translation_config) en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) new_truncated_translation = ( "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." ) input_ids = tok(model.config.prefix + en_text, return_tensors="tf").input_ids output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = T5Tokenizer.from_pretrained("google-t5/t5-base") task_specific_config = getattr(model.config, "task_specific_params", {}) translation_config = task_specific_config.get("translation_en_to_ro", {}) model.config.update(translation_config) original_input = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = "Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022." input_ids = tok.encode(model.config.prefix + original_input, return_tensors="tf") output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=50, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False) self.assertEqual(translation, expected_translation)
transformers/tests/models/t5/test_modeling_tf_t5.py/0
{ "file_path": "transformers/tests/models/t5/test_modeling_tf_t5.py", "repo_id": "transformers", "token_count": 21521 }
208
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import unittest from transformers import AutoBackbone from transformers.testing_utils import is_flaky, require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class TimmBackboneModelTester: def __init__( self, parent, out_indices=None, out_features=None, stage_names=None, backbone="resnet18", batch_size=3, image_size=32, num_channels=3, is_training=True, use_pretrained_backbone=True, ): self.parent = parent self.out_indices = out_indices if out_indices is not None else [4] self.stage_names = stage_names self.out_features = out_features self.backbone = backbone self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.use_pretrained_backbone = use_pretrained_backbone self.is_training = is_training def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return TimmBackboneConfig( image_size=self.image_size, num_channels=self.num_channels, out_features=self.out_features, out_indices=self.out_indices, stage_names=self.stage_names, use_pretrained_backbone=self.use_pretrained_backbone, backbone=self.backbone, ) def create_and_check_model(self, config, pixel_values): model = TimmBackbone(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.feature_map[-1].shape, (self.batch_size, model.channels[-1], 14, 14), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch @require_timm class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TimmBackbone,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": TimmBackbone} if is_torch_available() else {} test_resize_embeddings = False test_head_masking = False test_pruning = False has_attentions = False def setUp(self): # self.config_class = PretrainedConfig self.config_class = TimmBackboneConfig self.model_tester = TimmBackboneModelTester(self) self.config_tester = ConfigTester( self, config_class=self.config_class, has_text_modality=False, common_properties=["num_channels"] ) def test_config(self): self.config_tester.run_common_tests() @is_flaky( description="`TimmBackbone` has no `_init_weights`. Timm's way of weight init. seems to give larger magnitude in the intermediate values during `forward`." ) def test_batching_equivalence(self): super().test_batching_equivalence() def test_timm_transformer_backbone_equivalence(self): timm_checkpoint = "resnet18" transformers_checkpoint = "microsoft/resnet-18" timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names), len(transformers_model.stage_names)) self.assertEqual(timm_model.channels, transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices, [-1]) self.assertEqual(transformers_model.out_indices, [len(timm_model.stage_names) - 1]) timm_model = AutoBackbone.from_pretrained(timm_checkpoint, use_timm_backbone=True, out_indices=[1, 2, 3]) transformers_model = AutoBackbone.from_pretrained(transformers_checkpoint, out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices, transformers_model.out_indices) self.assertEqual(len(timm_model.out_features), len(transformers_model.out_features)) self.assertEqual(timm_model.channels, transformers_model.channels) @unittest.skip(reason="TimmBackbone doesn't support feed forward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="TimmBackbone doesn't have num_hidden_layers attribute") def test_hidden_states_output(self): pass @unittest.skip(reason="TimmBackbone initialization is managed on the timm side") def test_initialization(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="TimmBackbone models doesn't have inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="TimmBackbone model cannot be created without specifying a backbone checkpoint") def test_from_pretrained_no_checkpoint(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_save_load(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_checkpoints(self): pass @unittest.skip(reason="No support for low_cpu_mem_usage=True.") def test_save_load_low_cpu_mem_usage_no_safetensors(self): pass @unittest.skip(reason="model weights aren't tied in TimmBackbone.") def test_tie_model_weights(self): pass @unittest.skip(reason="model weights aren't tied in TimmBackbone.") def test_tied_model_weights_key_ignore(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_load_save_without_tied_weights(self): pass @unittest.skip(reason="Only checkpoints on timm can be loaded into TimmBackbone") def test_model_weights_reload_no_missing_tied_weights(self): pass @unittest.skip(reason="TimmBackbone doesn't have hidden size info in its configuration.") def test_channels(self): pass @unittest.skip(reason="TimmBackbone doesn't support output_attentions.") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="Safetensors is not supported by timm.") def test_can_use_safetensors(self): pass @unittest.skip(reason="Need to use a timm backbone and there is no tiny model available.") def test_model_is_small(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0][-1] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) # TimmBackbone config doesn't have out_features attribute def test_create_from_modified_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), len(config.out_indices)) self.assertEqual(len(model.channels), len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None modified_config = copy.deepcopy(config) modified_config.out_indices = None model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict) self.assertEqual(len(result.feature_maps), 1) self.assertEqual(len(model.channels), 1) # Check backbone can be initialized with fresh weights modified_config = copy.deepcopy(config) modified_config.use_pretrained_backbone = False model = model_class(modified_config) model.to(torch_device) model.eval() result = model(**inputs_dict)
transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py/0
{ "file_path": "transformers/tests/models/timm_backbone/test_modeling_timm_backbone.py", "repo_id": "transformers", "token_count": 4491 }
209
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs if is_vision_available(): from PIL import Image from transformers import ViltImageProcessor class ViltImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, size_divisor=2, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): super().__init__() size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.size_divisor = size_divisor self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to ViltImageProcessor, assuming do_resize is set to True with a scalar size and size_divisor. """ if not batched: size = self.size["shortest_edge"] image = image_inputs[0] if isinstance(image, Image.Image): w, h = image.size elif isinstance(image, np.ndarray): h, w = image.shape[0], image.shape[1] else: h, w = image.shape[1], image.shape[2] scale = size / min(w, h) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size max_size = int((1333 / 800) * size) if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) expected_height, expected_width = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return (self.num_channels, height, width) def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class ViltImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = ViltImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = ViltImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "size_divisor")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 30}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"shortest_edge": 42})
transformers/tests/models/vilt/test_image_processing_vilt.py/0
{ "file_path": "transformers/tests/models/vilt/test_image_processing_vilt.py", "repo_id": "transformers", "token_count": 2545 }
210
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class TFFlaubertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base") features = { "input_ids": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.int32), # "My dog is cute" "attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.int32), } output = model(features)["last_hidden_state"] expected_shape = tf.TensorShape((1, 6, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [ [ [0.0681762, 0.10894451, 0.06772504], [-0.06423668, 0.02366615, 0.04329344], [-0.06057295, 0.09974135, -0.00070584], ] ], dtype=tf.float32, ) self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py/0
{ "file_path": "transformers/tests/models/xlm_roberta/test_modeling_tf_xlm_roberta.py", "repo_id": "transformers", "token_count": 850 }
211
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_torch_available, is_vision_available, ) from transformers.pipelines import DocumentQuestionAnsweringPipeline, pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, require_tf, require_torch, require_torch_bf16, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class Image: @staticmethod def open(*args, **kwargs): pass def load_image(_): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. INVOICE_URL = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): dqa_pipeline = DocumentQuestionAnsweringPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) image = INVOICE_URL word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { "image": load_image(image), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def run_pipeline_test(self, dqa_pipeline, examples): outputs = dqa_pipeline(examples, top_k=2) self.assertEqual( outputs, [ [ {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, ] ] * 3, ) @require_torch @require_detectron2 @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2-for-dqa-test" ) image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) # We can optionnally pass directly the words and bounding boxes image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) @require_torch @require_torch_bf16 @require_detectron2 @require_pytesseract def test_small_model_pt_bf16(self): dqa_pipeline = pipeline( "document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2-for-dqa-test", torch_dtype=torch.bfloat16, ) image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) # We can optionnally pass directly the words and bounding boxes image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented # @require_torch # def test_small_model_pt_donut(self): # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" # question = "How many cats are there?" # # outputs = dqa_pipeline(image=image, question=question, top_k=2) # self.assertEqual( # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] # ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt_chunk(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=3), [ [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm_chunk(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) @slow @require_torch def test_large_model_pt_donut(self): dqa_pipeline = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), image_processor="naver-clova-ix/donut-base-finetuned-docvqa", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip(reason="Document question answering not implemented in TF") def test_small_model_tf(self): pass
transformers/tests/pipelines/test_pipelines_document_question_answering.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_document_question_answering.py", "repo_id": "transformers", "token_count": 7509 }
212
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def test_small_model_pt(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], ) outputs = text_generator(["This is a test", "This is a second test"]) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ], ) outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ) ## -- test tokenizer_kwargs test_str = "testing tokenizer kwargs. using truncation must result in a different generation." input_len = len(text_generator.tokenizer(test_str)["input_ids"]) output_str, output_str_with_truncation = ( text_generator(test_str, do_sample=False, return_full_text=False, min_new_tokens=1)[0]["generated_text"], text_generator( test_str, do_sample=False, return_full_text=False, min_new_tokens=1, truncation=True, max_length=input_len + 1, )[0]["generated_text"], ) assert output_str != output_str_with_truncation # results must be different because one had truncation ## -- test kwargs for preprocess_params outputs = text_generator("This is a test", do_sample=False, add_special_tokens=False, padding=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], ) # -- what is the point of this test? padding is hardcoded False in the pipeline anyway text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id text_generator.tokenizer.pad_token = "<pad>" outputs = text_generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ], ) @require_torch def test_small_chat_model_pt(self): text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, ] chat2 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a second test"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) expected_chat1 = chat1 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat1}, ], ) outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) expected_chat2 = chat2 + [ { "role": "assistant", "content": " stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs", } ] self.assertEqual( outputs, [ [{"generated_text": expected_chat1}], [{"generated_text": expected_chat2}], ], ) @require_torch def test_small_chat_model_continue_final_message(self): # Here we check that passing a chat that ends in an assistant message is handled correctly # by continuing the final message rather than starting a new one text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, {"role": "assistant", "content": "This is"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) # Assert that we continued the last message and there isn't a sneaky <|im_end|> self.assertEqual( outputs, [ { "generated_text": [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, { "role": "assistant", "content": "This is stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs", }, ] } ], ) @require_torch def test_small_chat_model_continue_final_message_override(self): # Here we check that passing a chat that ends in an assistant message is handled correctly # by continuing the final message rather than starting a new one text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10, continue_final_message=True) # Assert that we continued the last message and there isn't a sneaky <|im_end|> self.assertEqual( outputs, [ { "generated_text": [ {"role": "system", "content": "This is a system message."}, { "role": "user", "content": "This is a test stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs", }, ] } ], ) @require_torch def test_small_chat_model_with_dataset_pt(self): from torch.utils.data import Dataset from transformers.pipelines.pt_utils import KeyDataset class MyDataset(Dataset): data = [ [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, ], ] def __len__(self): return 1 def __getitem__(self, i): return {"text": self.data[i]} text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt" ) dataset = MyDataset() key_dataset = KeyDataset(dataset, "text") for outputs in text_generator(key_dataset, do_sample=False, max_new_tokens=10): expected_chat = dataset.data[0] + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat}, ], ) @require_torch def test_small_chat_model_with_iterator_pt(self): from transformers.pipelines.pt_utils import PipelineIterator text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="pt" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, ] chat2 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a second test"}, ] expected_chat1 = chat1 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] expected_chat2 = chat2 + [ { "role": "assistant", "content": " stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs", } ] def data(): yield from [chat1, chat2] outputs = text_generator(data(), do_sample=False, max_new_tokens=10) assert isinstance(outputs, PipelineIterator) outputs = list(outputs) self.assertEqual( outputs, [ [{"generated_text": expected_chat1}], [{"generated_text": expected_chat2}], ], ) @require_tf def test_small_model_tf(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], ) outputs = text_generator(["This is a test", "This is a second test"], do_sample=False) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ], ) @require_tf def test_small_chat_model_tf(self): text_generator = pipeline( task="text-generation", model="hf-internal-testing/tiny-gpt2-with-chatml-template", framework="tf" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, ] chat2 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a second test"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) expected_chat1 = chat1 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat1}, ], ) outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) expected_chat2 = chat2 + [ { "role": "assistant", "content": " stairs stairs stairs stairs stairs stairs stairs stairs stairs stairs", } ] self.assertEqual( outputs, [ [{"generated_text": expected_chat1}], [{"generated_text": expected_chat2}], ], ) def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, torch_dtype="float32", ): text_generator = TextGenerationPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, torch_dtype=torch_dtype, ) return text_generator, ["This is a test", "Another test"] def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") output = text_generator(prompt) self.assertEqual( output, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], ) output = text_generator(prompt, stop_sequence=" fe") self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) def run_pipeline_test(self, text_generator, _): model = text_generator.model tokenizer = text_generator.tokenizer outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator("This is a test", return_full_text=False) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False) outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=True) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) if text_generator.tokenizer.pad_token is not None: outputs = text_generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_text=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_tensors=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_text=True, return_tensors=True) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): outputs = text_generator("") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) else: with self.assertRaises((ValueError, AssertionError)): outputs = text_generator("", add_special_tokens=False) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. self.skipTest(reason="TF generation does not support max_new_tokens") # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ "RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM", "GPTNeoXJapaneseForCausalLM", "FuyuForCausalLM", "LlamaForCausalLM", ] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations if str(text_generator.device) == "cpu": with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator("This is a test" * 500, max_new_tokens=20) outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20) # Hole strategy cannot work if str(text_generator.device) == "cpu": with self.assertRaises(ValueError): text_generator( "This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, ) @require_torch @require_accelerate @require_torch_accelerator def test_small_model_pt_bloom_accelerate(self): import torch # Classic `model_kwargs` pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16}, ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto") self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) @require_torch @require_torch_accelerator def test_small_model_fp16(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device=torch_device, torch_dtype=torch.float16, ) pipe("This is a test") @require_torch @require_accelerate @require_torch_accelerator def test_pipeline_accelerate_top_p(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device_map=torch_device, torch_dtype=torch.float16 ) pipe("This is a test", do_sample=True, top_p=0.5) def test_pipeline_length_setting_warning(self): prompt = """Hello world""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") if text_generator.model.framework == "tf": logger = logging.get_logger("transformers.generation.tf_utils") else: logger = logging.get_logger("transformers.generation.utils") logger_msg = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10, max_new_tokens=1) self.assertIn(logger_msg, cl.out) # The user only sets one -> no warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_new_tokens=1) self.assertNotIn(logger_msg, cl.out) with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10) self.assertNotIn(logger_msg, cl.out) @require_torch def test_pipeline_assisted_generation(self): """Tests that we can run assisted generation in the pipeline""" model = "hf-internal-testing/tiny-random-MistralForCausalLM" pipe = pipeline("text-generation", model=model, assistant_model=model) # We can run the pipeline prompt = "Hello world" _ = pipe(prompt) # It is running assisted generation under the hood (e.g. flags incompatible with assisted gen will crash) with self.assertRaises(ValueError): _ = pipe(prompt, generate_kwargs={"num_beams": 2})
transformers/tests/pipelines/test_pipelines_text_generation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_text_generation.py", "repo_id": "transformers", "token_count": 12556 }
213
# Testing mixed int8 quantization ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1660567705337-62441d1d9fdefb55a0b7d12c.png) The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`. ## Library requirements + `transformers>=4.22.0` + `accelerate>=0.12.0` + `bitsandbytes>=0.31.5`. ## Hardware requirements The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported. ## Virutal envs ```bash conda create --name int8-testing python==3.8 pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` if `transformers>=4.23.0` is not released yet, then use: ```bash pip install git+https://github.com/huggingface/transformers.git ``` ## Troubleshooting A list of common errors: ### Torch does not correctly do the operations on GPU First check that: ```py import torch vec = torch.randn(1, 2, 3).to(0) ``` Works without any error. If not, install torch using `conda` like: ```bash conda create --name int8-testing python==3.8 conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge pip install bitsandbytes>=0.31.5 pip install accelerate>=0.12.0 pip install transformers>=4.23.0 ``` For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/) and the snippet above should work. ### ` bitsandbytes operations are not supported under CPU!` This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU. ### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.` Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved. ### `Parameter has no attribue .CB` Same solution as above. ### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1` Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section. ### `CUDA illegal memory error: an illegal memory access at line...`: Check the CUDA verisons with: ```bash nvcc --version ``` and confirm it is the same version as the one detected by `bitsandbytes`. If not, run: ```bash ls -l $CONDA_PREFIX/lib/libcudart.so ``` or ```bash ls -l $LD_LIBRARY_PATH ``` Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file. Here is an example of a badly configured CUDA installation: `nvcc --version` gives: ![Screenshot 2022-08-15 at 15.12.23.png](https://cdn-uploads.huggingface.co/production/uploads/1660569220888-62441d1d9fdefb55a0b7d12c.png) which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs: ![image.png](https://cdn-uploads.huggingface.co/production/uploads/1660569284243-62441d1d9fdefb55a0b7d12c.png) First check: ```bash echo $LD_LIBRARY_PATH ``` If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing: ```bash ls -l $path/libcudart.so ``` On each path (`$path`) separated by `:`. If not, simply run ```bash ls -l $LD_LIBRARY_PATH/libcudart.so ``` and you can see ![Screenshot 2022-08-15 at 15.12.33.png](https://cdn-uploads.huggingface.co/production/uploads/1660569176504-62441d1d9fdefb55a0b7d12c.png) If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file.
transformers/tests/quantization/bnb/README.md/0
{ "file_path": "transformers/tests/quantization/bnb/README.md", "repo_id": "transformers", "token_count": 1405 }
214
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
transformers/tests/sagemaker/__init__.py/0
{ "file_path": "transformers/tests/sagemaker/__init__.py", "repo_id": "transformers", "token_count": 36 }
215
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import copy import gc import inspect import math import os import os.path import random import re import tempfile import warnings from collections import defaultdict from contextlib import contextmanager from typing import Dict, List, Tuple import numpy as np from packaging import version from parameterized import parameterized from pytest import mark import transformers from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, PretrainedConfig, PreTrainedModel, is_torch_available, logging, set_seed, ) from transformers.integrations import HfDeepSpeedConfig from transformers.integrations.deepspeed import ( is_deepspeed_available, is_deepspeed_zero3_enabled, unset_hf_deepspeed_config, ) from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) from transformers.testing_utils import ( CaptureLogger, is_flaky, is_pt_flax_cross_test, is_pt_tf_cross_test, require_accelerate, require_bitsandbytes, require_deepspeed, require_flash_attn, require_non_xpu, require_safetensors, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_multi_accelerator, require_torch_multi_gpu, require_torch_sdpa, set_config_for_less_flaky_test, set_model_for_less_flaky_test, set_model_tester_for_less_flaky_test, slow, torch_device, ) from transformers.utils import ( CONFIG_NAME, GENERATION_CONFIG_NAME, SAFE_WEIGHTS_NAME, is_accelerate_available, is_flax_available, is_tf_available, is_torch_bf16_available_on_device, is_torch_fp16_available_on_device, is_torch_fx_available, is_torch_sdpa_available, ) from transformers.utils.generic import ContextManagers, ModelOutput if is_accelerate_available(): from accelerate.utils import compute_module_sizes if is_torch_available(): import torch import torch.nn.functional as F from safetensors.torch import load_file as safe_load_file from safetensors.torch import save_file as safe_save_file from torch import nn from transformers import MODEL_MAPPING, AdaptiveEmbedding from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_utils import load_state_dict, no_init_weights from transformers.pytorch_utils import id_tensor_storage if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp from tests.utils.test_modeling_flax_utils import check_models_equal from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_fx_available(): from transformers.utils.fx import _FX_SUPPORTED_MODELS_WITH_KV_CACHE, symbolic_trace if is_deepspeed_available(): import deepspeed def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init def _mock_init_weights(self, module): for name, param in module.named_parameters(recurse=False): # Use the first letter of the name to get a value and go from a <> -13 to z <> 12 value = ord(name[0].lower()) - 110 param.data.fill_(value) def _mock_all_init_weights(self): # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) import transformers.modeling_utils if transformers.modeling_utils._init_weights: for module in self.modules(): module._is_hf_initialized = False # Initialize weights self.apply(self._initialize_weights) # Tie weights should be skipped when not initializing all weights # since from_pretrained(...) calls tie weights anyways self.tie_weights() @contextmanager def _deepspeed_zero3(ds_config): dschf = HfDeepSpeedConfig(ds_config) try: yield dschf finally: unset_hf_deepspeed_config() def sdpa_kernel(enable_flash, enable_math, enable_mem_efficient): if version.parse(torch.__version__).release < version.parse("2.3").release: return torch.backends.cuda.sdp_kernel( enable_flash=enable_flash, enable_math=enable_math, enable_mem_efficient=enable_mem_efficient ) backends = [] if enable_flash: backends += [torch.nn.attention.SDPBackend.FLASH_ATTENTION] if enable_math: backends += [torch.nn.attention.SDPBackend.MATH] if enable_mem_efficient: backends += [torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION] return torch.nn.attention.sdpa_kernel(backends) @require_torch class ModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () fx_compatible = False test_torchscript = True test_pruning = True test_resize_embeddings = True test_resize_position_embeddings = False test_head_masking = True test_mismatched_shapes = True test_missing_keys = True test_model_parallel = False # Used in `check_training_gradient_checkpointing` to NOT check all params having gradient (e.g. for some MOE models) test_all_params_have_gradient = True is_encoder_decoder = False has_attentions = True _is_composite = False model_split_percents = [0.5, 0.7, 0.9] def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = copy.deepcopy(inputs_dict) if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict = { k: v.unsqueeze(1).expand(-1, self.model_tester.num_choices, -1).contiguous() if isinstance(v, torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } elif model_class.__name__ in get_values(MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES): inputs_dict.pop("attention_mask") elif model_class.__name__ == MODEL_FOR_PRETRAINING_MAPPING_NAMES["hiera"]: config = self.model_tester.get_config() mask_spatial_shape = [ i // s // ms for i, s, ms in zip(config.image_size, config.patch_stride, config.masked_unit_size) ] num_windows = math.prod(mask_spatial_shape) torch.manual_seed(0) inputs_dict["noise"] = torch.rand(self.model_tester.batch_size, num_windows) if return_labels: if model_class.__name__ in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) elif model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) elif model_class.__name__ in [ *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES), *get_values(MODEL_FOR_MASKED_LM_MAPPING_NAMES), *get_values(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES), *get_values(MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES), ]: inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES): num_patches = self.model_tester.image_size // self.model_tester.patch_size inputs_dict["bool_masked_pos"] = torch.zeros( (self.model_tester.batch_size, num_patches**2), dtype=torch.long, device=torch_device ) elif model_class.__name__ in get_values(MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = torch.zeros( [self.model_tester.batch_size, height, width], device=torch_device ).long() return inputs_dict def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_2 = out_2[~np.isneginf(out_2)] out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 out_1 = out_1[~np.isneginf(out_1)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) def test_from_pretrained_no_checkpoint(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) state_dict = model.state_dict() new_model = model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_keep_in_fp32_modules(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._keep_in_fp32_modules is None: self.skipTest(reason="Model class has no _keep_in_fp32_modules attribute defined") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16) for name, param in model.named_parameters(): if any(n in model_class._keep_in_fp32_modules for n in name.split(".")): self.assertTrue(param.dtype == torch.float32) else: self.assertTrue(param.dtype == torch.float16, name) def test_save_load_keys_to_ignore_on_save(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) _keys_to_ignore_on_save = getattr(model, "_keys_to_ignore_on_save", None) if _keys_to_ignore_on_save is None: continue # check the keys are in the original state_dict for k in _keys_to_ignore_on_save: self.assertIn(k, model.state_dict().keys(), "\n".join(model.state_dict().keys())) # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) output_model_file = os.path.join(tmpdirname, SAFE_WEIGHTS_NAME) state_dict_saved = safe_load_file(output_model_file) for k in _keys_to_ignore_on_save: self.assertNotIn(k, state_dict_saved.keys(), "\n".join(state_dict_saved.keys())) # Test we can load the state dict in the model, necessary for the checkpointing API in Trainer. load_result = model.load_state_dict(state_dict_saved, strict=False) keys_to_ignore = set(model._keys_to_ignore_on_save) if hasattr(model, "_tied_weights_keys"): keys_to_ignore.update(set(model._tied_weights_keys)) self.assertTrue(len(load_result.missing_keys) == 0 or set(load_result.missing_keys) == keys_to_ignore) self.assertTrue(len(load_result.unexpected_keys) == 0) def test_gradient_checkpointing_backward_compatibility(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue config.gradient_checkpointing = True model = model_class(config) self.assertTrue(model.is_gradient_checkpointing) def test_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.gradient_checkpointing_enable() self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) def test_peft_gradient_checkpointing_enable_disable(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if not model_class.supports_gradient_checkpointing: continue # at init model should have gradient checkpointing disabled model = model_class(config) self.assertFalse(model.is_gradient_checkpointing) # check enable works model._hf_peft_config_loaded = True try: model.gradient_checkpointing_enable() except NotImplementedError: continue self.assertTrue(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to True for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertTrue( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to True" ) # check disable works model.gradient_checkpointing_disable() self.assertFalse(model.is_gradient_checkpointing) # Loop over all modules and check that relevant modules have gradient_checkpointing set to False for n, m in model.named_modules(): if hasattr(m, "gradient_checkpointing"): self.assertFalse( m.gradient_checkpointing, f"Module {n} does not have gradient_checkpointing set to False" ) @is_flaky(description="low likelihood of failure, reason not yet discovered") def test_save_load_fast_init_from_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason=f"{config.__class__.__name__} not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(model_class): pass model_class_copy = CopyClass # make sure that all keys are expected for test model_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless model_class_copy._init_weights = _mock_init_weights model_class_copy.init_weights = _mock_all_init_weights model = base_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = model_class_copy.from_pretrained(tmpdirname) model_slow_init = model_class_copy.from_pretrained(tmpdirname, _fast_init=False) # Before we test anything for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = (model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key]).sum().item() else: max_diff = (model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: model_to_save = model_class(config) model_to_save.save_pretrained(saved_model_path) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage_checkpoints(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: model_to_save = model_class(config) model_to_save.config.save_pretrained(saved_model_path) torch.save(model_to_save.state_dict(), os.path.join(saved_model_path, "pytorch_model.bin")) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) @slow @require_accelerate @mark.accelerate_tests def test_save_load_low_cpu_mem_usage_no_safetensors(self): with tempfile.TemporaryDirectory() as saved_model_path: for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model_to_save = model_class(config) model_to_save.save_pretrained(saved_model_path, safe_serialization=False) self._check_save_load_low_cpu_mem_usage(model_class, saved_model_path) def _check_save_load_low_cpu_mem_usage(self, model_class, saved_model_path): from accelerate.utils.modeling import named_module_tensors # Load the low usage and the normal models. model_low_usage, loading_info = model_class.from_pretrained( saved_model_path, low_cpu_mem_usage=True, output_loading_info=True, ) model_non_low_usage = model_class.from_pretrained(saved_model_path) # Check that there were no missing keys. self.assertEqual(loading_info["missing_keys"], []) # The low_cpu_mem_usage=True causes the model params to be initialized with device=meta, and then # subsequently loaded with the correct values and onto the correct device. We check if there are any # remaining params that were not properly loaded. for name, tensor in named_module_tensors(model_low_usage, recurse=True): self.assertNotEqual( tensor.device, torch.device("meta"), "Tensor '" + name + "' has not been properly loaded and has device=meta.", ) # Check that the parameters are equal. for p1, p2 in zip(model_low_usage.parameters(), model_non_low_usage.parameters()): self.assertEqual(p1.data.ne(p2.data).sum(), 0) # Check that the state dict keys are equal. self.assertEqual(set(model_low_usage.state_dict().keys()), set(model_non_low_usage.state_dict().keys())) # Check that the shared tensors are equal. tensor_ptrs1 = collections.defaultdict(list) for name, tensor in model_low_usage.state_dict().items(): tensor_ptrs1[id_tensor_storage(tensor)].append(name) tied_params1 = [names for _, names in tensor_ptrs1.items() if len(names) > 1] tensor_ptrs2 = collections.defaultdict(list) for name, tensor in model_non_low_usage.state_dict().items(): tensor_ptrs2[id_tensor_storage(tensor)].append(name) tied_params2 = [names for _, names in tensor_ptrs2.items() if len(names) > 1] self.assertEqual(tied_params1, tied_params2) def test_save_load_fast_init_to_base(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason=f"{config.__class__.__name__} not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() # this will often delete a single weight of a multi-weight module # to test an edge case random_key_to_del = random.choice(list(state_dict.keys())) del state_dict[random_key_to_del] # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) model_fast_init = base_class_copy.from_pretrained(tmpdirname) model_slow_init = base_class_copy.from_pretrained(tmpdirname, _fast_init=False) for key in model_fast_init.state_dict().keys(): if isinstance(model_slow_init.state_dict()[key], torch.BoolTensor): max_diff = torch.max( model_slow_init.state_dict()[key] ^ model_fast_init.state_dict()[key] ).item() else: max_diff = torch.max( torch.abs(model_slow_init.state_dict()[key] - model_fast_init.state_dict()[key]) ).item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_torch_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.__class__ not in MODEL_MAPPING: self.skipTest(reason=f"{config.__class__.__name__} not in MODEL_MAPPING") base_class = MODEL_MAPPING[config.__class__] if isinstance(base_class, tuple): base_class = base_class[0] for model_class in self.all_model_classes: if model_class == base_class: continue # make a copy of model class to not break future tests # from https://stackoverflow.com/questions/9541025/how-to-copy-a-python-class class CopyClass(base_class): pass base_class_copy = CopyClass # make sure that all keys are expected for test base_class_copy._keys_to_ignore_on_load_missing = [] # make init deterministic, but make sure that # non-initialized weights throw errors nevertheless base_class_copy._init_weights = _mock_init_weights base_class_copy.init_weights = _mock_all_init_weights model = model_class(config) state_dict = model.state_dict() def check_equal(loaded): for key in state_dict.keys(): max_diff = torch.max( state_dict()[key] ^ loaded[key] if isinstance(state_dict[key], torch.BoolTensor) else torch.abs(state_dict[key] - loaded[key]) ).item() self.assertLessEqual(max_diff, 1e-6, msg=f"{key} not identical") # check that certain keys didn't get saved with the model with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pytorch_model.bin") torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=True) check_equal(load_state_dict(pt_checkpoint_path)) torch.save(state_dict, pt_checkpoint_path, _use_new_zipfile_serialization=False) check_equal(load_state_dict(pt_checkpoint_path)) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] out_1 = out_1[~np.isneginf(out_1)] out_2 = out_2[~np.isneginf(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) def test_batching_equivalence(self): """ Tests that the model supports batching and that the output is the nearly the same for the same input in different batch sizes. (Why "nearly the same" not "exactly the same"? Batching uses different matmul shapes, which often leads to different results: https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535) """ def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return # do not compare int or bool outputs as they are mostly computed with max/argmax/topk methods which are # very sensitive to the inputs (e.g. tiny differences may give totally different results) elif not torch.is_floating_point(batched_object): return else: # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) try: torch.testing.assert_close(batched_row, single_row_object, atol=1e-5, rtol=1e-5) except AssertionError as e: msg = f"Batched and Single row outputs are not equal in {model_name} for key={key}.\n\n" msg += str(e) raise AssertionError(msg) set_model_tester_for_less_flaky_test(self) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ if hasattr(self.model_tester, "prepare_config_and_inputs_for_model_class"): config, batched_input = self.model_tester.prepare_config_and_inputs_for_model_class(model_class) batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() set_model_for_less_flaky_test(model) batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: # e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] else: single_row_input[key] = value with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: # DETR starts from zero-init queries to decoder, leading to cos_similarity = `nan` if hasattr(self, "zero_init_hidden_state") and "decoder_hidden_states" in key: model_batched_output[key] = model_batched_output[key][1:] model_row_output[key] = model_row_output[key][1:] recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def check_training_gradient_checkpointing(self, gradient_checkpointing_kwargs=None): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: with self.subTest(model_class.__name__): if ( model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ] or not model_class.supports_gradient_checkpointing ): # TODO (ydshieh): use `skipTest` once pytest-dev/pytest-subtests/pull/169 is merged # self.skipTest(reason=f"`supports_gradient_checkpointing` is False for {model_class.__name__}.") continue config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable(gradient_checkpointing_kwargs=gradient_checkpointing_kwargs) model.train() # unfreeze additional layers for p in model.parameters(): p.requires_grad_(True) optimizer = torch.optim.SGD(model.parameters(), lr=0.01) inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() optimizer.step() if self.test_all_params_have_gradient: for k, v in model.named_parameters(): if v.requires_grad: self.assertTrue(v.grad is not None, f"{k} in {model_class.__name__} has no gradient!") def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="ModelTester is not configured to run training tests") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True if model_class.__name__ in [ *get_values(MODEL_MAPPING_NAMES), *get_values(MODEL_FOR_BACKBONE_MAPPING_NAMES), ]: continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): # Scenario - 1 default behaviour self.check_training_gradient_checkpointing() def test_training_gradient_checkpointing_use_reentrant(self): # Scenario - 2 with `use_reentrant=True` - this is the default value that is used in pytorch's # torch.utils.checkpoint.checkpoint self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": True}) def test_training_gradient_checkpointing_use_reentrant_false(self): # Scenario - 3 with `use_reentrant=False` pytorch suggests users to use this value for # future releases: https://pytorch.org/docs/stable/checkpoint.html self.check_training_gradient_checkpointing(gradient_checkpointing_kwargs={"use_reentrant": False}) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits if model_class.__name__ in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_torchscript_simple(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_attentions = True self._create_and_check_torchscript(config, inputs_dict) @slow def test_torchscript_output_hidden_state(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True self._create_and_check_torchscript(config, inputs_dict) # This is copied from `torch/testing/_internal/jit_utils.py::clear_class_registry` def clear_torch_jit_class_registry(self): torch._C._jit_clear_class_registry() torch.jit._recursive.concrete_type_store = torch.jit._recursive.ConcreteTypeStore() # torch 1.8 has no `_clear_class_state` in `torch.jit._state` if hasattr(torch.jit._state, "_clear_class_state"): torch.jit._state._clear_class_state() def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: for attn_implementation in ["eager", "sdpa"]: if attn_implementation == "sdpa" and (not model_class._supports_sdpa or not is_torch_sdpa_available()): continue configs_no_init._attn_implementation = attn_implementation model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward main_input = inputs[main_input_name] attention_mask = inputs["attention_mask"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] outputs = model(main_input, attention_mask, decoder_input_ids, decoder_attention_mask) # `torchscript` doesn't work with outputs containing `Cache` object. However, #35235 makes # several models to use `Cache` by default instead of the legacy cache (tuple), and # their `torchscript` tests are failing. We won't support them anyway, but we still want to keep # the tests for encoder models like `BERT`. So we skip the checks if the model's output contains # a `Cache` object. if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (main_input, attention_mask, decoder_input_ids, decoder_attention_mask) ) elif "bbox" in inputs and "image" in inputs: # LayoutLMv2 requires additional inputs input_ids = inputs["input_ids"] bbox = inputs["bbox"] image = inputs["image"].tensor outputs = model(input_ids, bbox, image) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (input_ids, bbox, image), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif "bbox" in inputs: # Bros requires additional inputs (bbox) input_ids = inputs["input_ids"] bbox = inputs["bbox"] outputs = model(input_ids, bbox) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (input_ids, bbox), check_trace=False ) # when traced model is checked, an error is produced due to name mangling elif ( "pixel_values" in inputs and "prompt_pixel_values" in inputs and "prompt_masks" in inputs ): # SegGpt requires additional inputs pixel_values = inputs["pixel_values"] prompt_pixel_values = inputs["prompt_pixel_values"] prompt_masks = inputs["prompt_masks"] outputs = model(pixel_values, prompt_pixel_values, prompt_masks) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace( model, (pixel_values, prompt_pixel_values, prompt_masks), check_trace=False ) # when traced model is checked, an error is produced due to name mangling else: main_input = inputs[main_input_name] if model.config._attn_implementation == "sdpa": trace_input = {main_input_name: main_input} if "attention_mask" in inputs: trace_input["attention_mask"] = inputs["attention_mask"] else: self.skipTest(reason="testing SDPA without attention_mask is not supported") outputs = model(main_input, attention_mask=inputs["attention_mask"]) if any(isinstance(x, Cache) for x in outputs): continue # example_kwarg_inputs was introduced in torch==2.0, but it is fine here since SDPA has a requirement on torch>=2.1. traced_model = torch.jit.trace(model, example_kwarg_inputs=trace_input) else: outputs = model(main_input) if any(isinstance(x, Cache) for x in outputs): continue traced_model = torch.jit.trace(model, (main_input,)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict.keys(): if key not in model_state_dict.keys(): non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_torch_fx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict) def test_torch_fx_output_loss(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() self._create_and_check_torch_fx_tracing(config, inputs_dict, output_loss=True) def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: self.skipTest( f"Either torch.fx is not available, or the model type {config.model_type} is not compatible with torch.fx" ) configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) # We may want to test several inputs (various shapes, etc.). inputs_to_test = [inputs] if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "inputs_embeds", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", "noise", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") if model.config.model_type in _FX_SUPPORTED_MODELS_WITH_KV_CACHE: input_names.append("past_key_values") # Generally model_tester.prepare_config_and_inputs_for_common seem not to generate past key values inputs. if "past_key_values" not in inputs: batch_size = inputs[next(iter(inputs))].shape[0] num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads cache_shape = (batch_size, num_heads, 0, head_dim) empty_pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) empty_pkv = ( DynamicCache.from_legacy_cache(empty_pkv) if model_class._supports_cache_class else empty_pkv ) cache_length = 9 cache_shape = (batch_size, num_heads, cache_length, head_dim) non_empty_pkv = tuple( ( torch.rand(cache_shape, dtype=torch.float, device=torch_device), torch.rand(cache_shape, dtype=torch.float, device=torch_device), ) for i in range(model.config.num_hidden_layers) ) non_empty_pkv = ( DynamicCache.from_legacy_cache(non_empty_pkv) if model_class._supports_cache_class else non_empty_pkv ) inps = copy.deepcopy(inputs_to_test[0]) inputs_to_test[0]["past_key_values"] = empty_pkv inps["past_key_values"] = non_empty_pkv inputs_to_test.append(inps) past_mask = torch.ones(batch_size, cache_length, device=torch_device, dtype=torch.float) inputs_to_test[1]["attention_mask"] = torch.cat( (past_mask, inputs_to_test[1]["attention_mask"]), dim=1 ) forward_parameters = inspect.signature(model.forward).parameters if "input_ids" in forward_parameters and "inputs_embeds" in forward_parameters: inps = copy.deepcopy(inputs_to_test[0]) embedding_size = ( model.config.embedding_size if getattr(model.config, "embedding_size", None) is not None and model.config.model_type != "megatron-bert" else model.config.hidden_size ) if ( model.config.model_type in MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES and model.__class__.__name__ == MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES[model.config.model_type] ): batch_size, num_choices, sequence_length = inputs["input_ids"].shape shape = (batch_size, num_choices, sequence_length, embedding_size) elif inps["input_ids"].ndim == 2: batch_size, sequence_length = inputs["input_ids"].shape shape = (batch_size, sequence_length, embedding_size) else: self.skipTest("Unknown case") del inps["input_ids"] inps["inputs_embeds"] = torch.rand(shape, dtype=torch.float, device=torch_device) inputs_to_test.append(inps) for inps in inputs_to_test: filtered_inputs = {k: v for (k, v) in inps.items() if k in input_names} input_names_to_trace = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" model.config.use_cache = "past_key_values" in input_names_to_trace traced_model = symbolic_trace(model, input_names_to_trace) with torch.no_grad(): traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() def test_headmasking(self): if not self.test_head_masking: self.skipTest(reason="Model does not support head masking") global_rng.seed(42) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() global_rng.seed() inputs_dict["output_attentions"] = True config.output_hidden_states = True configs_no_init = _config_zero_init(config) # To be sure we have no Nan for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() # Prepare head_mask # Set require_grad after having prepared the tensor to avoid error (leaf variable has been moved into the graph interior) head_mask = torch.ones( self.model_tester.num_hidden_layers, self.model_tester.num_attention_heads, device=torch_device, ) head_mask[0, 0] = 0 head_mask[-1, :-1] = 0 head_mask.requires_grad_(requires_grad=True) inputs = self._prepare_for_class(inputs_dict, model_class).copy() inputs["head_mask"] = head_mask if model.config.is_encoder_decoder: signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] if "decoder_head_mask" in arg_names: # necessary diferentiation because of T5 model inputs["decoder_head_mask"] = head_mask if "cross_attn_head_mask" in arg_names: inputs["cross_attn_head_mask"] = head_mask outputs = model(**inputs, return_dict=True) # Test that we can get a gradient back for importance score computation output = sum(t.sum() for t in outputs[0]) output = output.sum() output.backward() multihead_outputs = head_mask.grad self.assertIsNotNone(multihead_outputs) self.assertEqual(len(multihead_outputs), self.model_tester.num_hidden_layers) def check_attentions_validity(attentions): # Remove Nan for t in attentions: self.assertLess( torch.sum(torch.isnan(t)), t.numel() / 4 ) # Check we don't have more than 25% nans (arbitrary) attentions = [ t.masked_fill(torch.isnan(t), 0.0) for t in attentions ] # remove them (the test is less complete) self.assertAlmostEqual(attentions[0][..., 0, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].flatten().sum().item(), 0.0) if len(attentions) > 2: # encoder-decoder models have only 2 layers in each module self.assertNotEqual(attentions[1][..., 0, :, :].flatten().sum().item(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].flatten().sum().item(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].flatten().sum().item(), 0.0) if model.config.is_encoder_decoder: check_attentions_validity(outputs.encoder_attentions) check_attentions_validity(outputs.decoder_attentions) check_attentions_validity(outputs.cross_attentions) else: check_attentions_validity(outputs.attentions) def test_head_pruning(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_pretrained(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False model = model_class(config=config) model.to(torch_device) model.eval() heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } model.prune_heads(heads_to_prune) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_save_load_from_config_init(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = { 0: list(range(1, self.model_tester.num_attention_heads)), -1: [0], } config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], 1) # TODO: To have this check, we will need at least 3 layers. Do we really need it? # self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads) self.assertEqual(attentions[-1].shape[-3], self.model_tester.num_attention_heads - 1) def test_head_pruning_integration(self): if not self.test_pruning: self.skipTest(reason="Pruning is not activated") for model_class in self.all_model_classes: ( config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if "head_mask" in inputs_dict: del inputs_dict["head_mask"] inputs_dict["output_attentions"] = True config.output_hidden_states = False heads_to_prune = {1: [1, 2]} config.pruned_heads = heads_to_prune model = model_class(config=config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) with tempfile.TemporaryDirectory() as temp_dir_name: model.save_pretrained(temp_dir_name) model = model_class.from_pretrained(temp_dir_name) model.to(torch_device) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 0) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) heads_to_prune = {0: [0], 1: [1, 2]} model.prune_heads(heads_to_prune) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs[-1] self.assertEqual(attentions[0].shape[-3], self.model_tester.num_attention_heads - 1) self.assertEqual(attentions[1].shape[-3], self.model_tester.num_attention_heads - 2) self.assertDictEqual(model.config.pruned_heads, {0: [0], 1: [1, 2]}) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: seq_length = seq_length * self.model_tester.chunk_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = self.has_attentions # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] if config.is_encoder_decoder: # Seq2Seq models encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() decoder_hidden_states = outputs.decoder_hidden_states[0] decoder_hidden_states.retain_grad() if self.has_attentions: encoder_attentions = outputs.encoder_attentions[0] encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(decoder_hidden_states.grad) if self.has_attentions: self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) else: # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: attentions = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def test_feed_forward_chunking(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: torch.manual_seed(0) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model.eval() hidden_states_no_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.manual_seed(0) config.chunk_size_feed_forward = 1 model = model_class(config) model.to(torch_device) model.eval() hidden_states_with_chunk = model(**self._prepare_for_class(inputs_dict, model_class))[0] torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-3, atol=1e-3) def test_resize_position_vector_embeddings(self): if not self.test_resize_position_embeddings: self.skipTest(reason="Model does not have position embeddings") ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() max_position_embeddings = config.max_position_embeddings # Retrieve the embeddings and clone theme if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() encoder_cloned_embeddings = encoder_model_embed.weight.clone() decoder_cloned_embeddings = decoder_model_embed.weight.clone() else: model_embed = model.get_position_embeddings() cloned_embeddings = model_embed.weight.clone() # Check that resizing the position embeddings with a larger max_position_embeddings increases # the model's postion embeddings size model.resize_position_embeddings(max_position_embeddings + 10) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings + 10) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] + 10) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] + 10) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the position embeddings with a smaller max_position_embeddings decreases # the model's max_position_embeddings model.resize_position_embeddings(max_position_embeddings - 5) self.assertEqual(model.config.max_position_embeddings, max_position_embeddings - 5) # Check that it actually resizes the embeddings matrix if model.config.is_encoder_decoder: encoder_model_embed, decoder_model_embed = model.get_position_embeddings() self.assertEqual(encoder_model_embed.weight.shape[0], encoder_cloned_embeddings.shape[0] - 5) self.assertEqual(decoder_model_embed.weight.shape[0], decoder_cloned_embeddings.shape[0] - 5) else: model_embed = model.get_position_embeddings() self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 5) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True if model.config.is_encoder_decoder: for p1, p2 in zip(encoder_cloned_embeddings, encoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False for p1, p2 in zip(decoder_cloned_embeddings, decoder_model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False else: for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_tokens_embeddings(self): if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict.pop("labels", None) for model_class in self.all_model_classes: config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_embed_pre_resize = model.get_input_embeddings() type_model_embed_pre_resize = type(model_embed_pre_resize) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.get_text_config().vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(model_embed.weight, modifier_rank=None): old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model_inputs = self._prepare_for_class(inputs_dict, model_class) model(**model_inputs) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) # make sure that decoder_input_ids are resized as well if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model_inputs = self._prepare_for_class(inputs_dict, model_class) model(**model_inputs) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) del model del config # Copy again. config changed with embedding resizing (`vocab_size` changed) config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_vocab_size = config.get_text_config().vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertTrue(new_model_vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], new_model_vocab_size) self.assertTrue(new_model_vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # Test when `vocab_size` is smaller than `hidden_size`. del model del config # Copy again. config changed with embedding resizing (`vocab_size` changed) config = copy.deepcopy(original_config) config.vocab_size = 4 config.pad_token_id = 3 if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config) model.to(torch_device) model_vocab_size = config.get_text_config().vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check to make sure the type of embeddings returned post resizing is same as type of input type_model_embed_post_resize = type(model_embed) self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize) # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(model_embed.weight, modifier_rank=None): old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(model_embed.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(model_embed.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) @require_deepspeed @require_torch_accelerator def test_resize_tokens_embeddings_with_deepspeed(self): ds_config = { "zero_optimization": { "stage": 3, "offload_param": {"device": "cpu", "pin_memory": True}, }, } with _deepspeed_zero3(ds_config): self.test_resize_tokens_embeddings() @require_deepspeed @require_torch_multi_gpu def test_resize_tokens_embeddings_with_deepspeed_multi_gpu(self): ds_config = { "zero_optimization": { "stage": 3, }, } with _deepspeed_zero3(ds_config): self.test_resize_tokens_embeddings() def test_resize_embeddings_untied(self): if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to `False`") original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False inputs_dict.pop("labels", None) # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untied embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) if is_deepspeed_zero3_enabled(): with deepspeed.zero.Init(): model = model_class(config) else: model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.get_text_config().vocab_size model.resize_token_embeddings(model_vocab_size + 10) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model(**self._prepare_for_class(inputs_dict, model_class)) # Test multivariate resizing. model.resize_token_embeddings(model_vocab_size + 10) output_embeds = model.get_output_embeddings() # Check that added embeddings mean is close to the old embeddings mean if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(output_embeds.weight, modifier_rank=None): old_embeddings_mean = torch.mean(output_embeds.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(output_embeds.weight.data[-10:, :], axis=0) else: old_embeddings_mean = torch.mean(output_embeds.weight.data[:-10, :], axis=0) new_embeddings_mean = torch.mean(output_embeds.weight.data[-10:, :], axis=0) torch.testing.assert_close(old_embeddings_mean, new_embeddings_mean, rtol=1e-3, atol=1e-3) # check if the old bias mean close to added bias mean. if output_embeds.bias is not None: if is_deepspeed_zero3_enabled(): with deepspeed.zero.GatheredParameters(output_embeds.bias, modifier_rank=None): old_bias_mean = torch.mean(output_embeds.bias.data[:-10], axis=0) new_bias_mean = torch.mean(output_embeds.bias.data[-10:], axis=0) else: old_bias_mean = torch.mean(output_embeds.bias.data[:-10], axis=0) new_bias_mean = torch.mean(output_embeds.bias.data[-10:], axis=0) torch.testing.assert_close(old_bias_mean, new_bias_mean, rtol=1e-5, atol=1e-5) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) new_model_vocab_size = model.config.get_text_config().vocab_size self.assertEqual(new_model_vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 1) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) if not is_deepspeed_zero3_enabled(): # A distriputed launcher is needed for the forward pass when deepspeed is enabled model(**self._prepare_for_class(inputs_dict, model_class)) @require_deepspeed @require_torch_accelerator def test_resize_embeddings_untied_with_deepspeed(self): ds_config = { "zero_optimization": { "stage": 3, "offload_param": {"device": "cpu", "pin_memory": True}, }, } with _deepspeed_zero3(ds_config): self.test_resize_embeddings_untied() @require_deepspeed @require_torch_multi_gpu def test_resize_embeddings_untied_with_deepspeed_multi_gpu(self): ds_config = { "zero_optimization": { "stage": 3, }, } with _deepspeed_zero3(ds_config): self.test_resize_embeddings_untied() def test_model_get_set_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding, AdaptiveEmbedding)) new_input_embedding_layer = nn.Embedding(10, 10) model.set_input_embeddings(new_input_embedding_layer) self.assertEqual(model.get_input_embeddings(), new_input_embedding_layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "forward")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_correct_missing_keys(self): if not self.test_missing_keys: self.skipTest(reason="test_missing_keys is set to `False`") config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) base_model_prefix = model.base_model_prefix if hasattr(model, base_model_prefix): extra_params = {k: v for k, v in model.named_parameters() if not k.startswith(base_model_prefix)} extra_params.update({k: v for k, v in model.named_buffers() if not k.startswith(base_model_prefix)}) # Some models define this as None if model._keys_to_ignore_on_load_missing: for key in model._keys_to_ignore_on_load_missing: extra_params.pop(key, None) if not extra_params: # In that case, we *are* on a head model, but every # single key is not actual parameters and this is # tested in `test_tied_model_weights_key_ignore` test. continue with tempfile.TemporaryDirectory() as temp_dir_name: model.base_model.save_pretrained(temp_dir_name) model, loading_info = model_class.from_pretrained(temp_dir_name, output_loading_info=True) self.assertGreater(len(loading_info["missing_keys"]), 0, model.__class__.__name__) def test_tie_model_weights(self): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to `False`") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_same_values(layer_1, layer_2): equal = True for p1, p2 in zip(layer_1.weight, layer_2.weight): if p1.data.ne(p2.data).sum() > 0: equal = False return equal for model_class in self.all_model_classes: config.torchscript = True model_not_tied = model_class(config) if model_not_tied.get_output_embeddings() is None: continue config_tied = copy.deepcopy(config) config_tied.torchscript = False model_tied = model_class(config_tied) params_tied = list(model_tied.parameters()) # Check that the embedding layer and decoding layer are the same in size and in value # self.assertTrue(check_same_values(embeddings, decoding)) # Check that after resize they remain tied. vocab_size = config.get_text_config().vocab_size model_tied.resize_token_embeddings(vocab_size + 10) params_tied_2 = list(model_tied.parameters()) self.assertEqual(len(params_tied_2), len(params_tied)) @require_safetensors def test_can_use_safetensors(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model_tied = model_class(config) with tempfile.TemporaryDirectory() as d: try: model_tied.save_pretrained(d, safe_serialization=True) except Exception as e: raise Exception(f"Class {model_class.__name__} cannot be saved using safetensors: {e}") model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model_tied.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) # Checking the tensor sharing are correct ptrs = defaultdict(list) for k, v in model_tied.state_dict().items(): ptrs[v.data_ptr()].append(k) shared_ptrs = {k: v for k, v in ptrs.items() if len(v) > 1} for _, shared_names in shared_ptrs.items(): reloaded_ptrs = {reloaded_state[k].data_ptr() for k in shared_names} self.assertEqual( len(reloaded_ptrs), 1, f"The shared pointers are incorrect, found different pointers for keys {shared_names}", ) def test_load_save_without_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.tie_word_embeddings = False for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as d: model.save_pretrained(d) model_reloaded, infos = model_class.from_pretrained(d, output_loading_info=True) # Checking the state dicts are correct reloaded_state = model_reloaded.state_dict() for k, v in model.state_dict().items(): self.assertIn(k, reloaded_state, f"Key {k} is missing from reloaded") torch.testing.assert_close( v, reloaded_state[k], msg=lambda x: f"{model_class.__name__}: Tensor {k}: {x}" ) # Checking there was no complain of missing weights self.assertEqual(infos["missing_keys"], []) def test_tied_weights_keys(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.get_text_config().tie_word_embeddings = True for model_class in self.all_model_classes: model_tied = model_class(config) ptrs = collections.defaultdict(list) for name, tensor in model_tied.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) # These are all the pointers of shared tensors. tied_params = [names for _, names in ptrs.items() if len(names) > 1] tied_weight_keys = model_tied._tied_weights_keys if model_tied._tied_weights_keys is not None else [] # Detect we get a hit for each key for key in tied_weight_keys: is_tied_key = any(re.search(key, p) for group in tied_params for p in group) self.assertTrue(is_tied_key, f"{key} is not a tied weight key for {model_class}.") # Removed tied weights found from tied params -> there should only be one left after for key in tied_weight_keys: for i in range(len(tied_params)): tied_params[i] = [p for p in tied_params[i] if re.search(key, p) is None] tied_params = [group for group in tied_params if len(group) > 1] self.assertListEqual( tied_params, [], f"Missing `_tied_weights_keys` for {model_class}: add all of {tied_params} except one.", ) def test_model_weights_reload_no_missing_tied_weights(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # We are nuking ALL weights on file, so every parameter should # yell on load. We're going to detect if we yell too much, or too little. placeholder_dict = {"tensor": torch.tensor([1, 2])} safe_save_file(placeholder_dict, os.path.join(tmp_dir, "model.safetensors"), metadata={"format": "pt"}) model_reloaded, infos = model_class.from_pretrained(tmp_dir, output_loading_info=True) prefix = f"{model_reloaded.base_model_prefix}." params = dict(model_reloaded.named_parameters()) params.update(dict(model_reloaded.named_buffers())) param_names = {k[len(prefix) :] if k.startswith(prefix) else k for k in params.keys()} missing_keys = set(infos["missing_keys"]) extra_missing = missing_keys - param_names # Remove tied weights from extra missing: they are normally not warned as missing if their tied # counterpart is present but here there are no weights at all so we do get the warning. ptrs = collections.defaultdict(list) for name, tensor in model_reloaded.state_dict().items(): ptrs[id_tensor_storage(tensor)].append(name) tied_params = [names for _, names in ptrs.items() if len(names) > 1] for group in tied_params: group = {k[len(prefix) :] if k.startswith(prefix) else k for k in group} # We remove the group from extra_missing if not all weights from group are in it if len(group - extra_missing) > 0: extra_missing = extra_missing - set(group) self.assertEqual( extra_missing, set(), f"This model {model_class.__name__} might be missing some `keys_to_ignore`: {extra_missing}. " f"For debugging, tied parameters are {tied_params}", ) missed_missing = param_names - missing_keys # Remove nonpersistent buffers from missed_missing buffers = [n for n, _ in model_reloaded.named_buffers()] nonpersistent_buffers = {n for n in buffers if n not in model_reloaded.state_dict()} nonpersistent_buffers = { k[len(prefix) :] if k.startswith(prefix) else k for k in nonpersistent_buffers } missed_missing = missed_missing - nonpersistent_buffers if model_reloaded._keys_to_ignore_on_load_missing is None: expected_missing = set() else: expected_missing = set(model_reloaded._keys_to_ignore_on_load_missing) self.assertEqual( missed_missing, expected_missing, f"This model {model_class.__name__} ignores keys {missed_missing} but they look like real" " parameters. If they are non persistent buffers make sure to instantiate them with" " `persistent=False`", ) def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return # model might return non-tensors objects (e.g. Cache class) elif isinstance(tuple_object, torch.Tensor): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _make_attention_mask_non_null(self, inputs_dict): """Make sure no sequence has all zeros as attention mask""" for k in ["attention_mask", "encoder_attention_mask", "decoder_attention_mask"]: if k in inputs_dict: attention_mask = inputs_dict[k] # Make sure no all 0s attention masks - to avoid failure at this moment. # Put `1` at the beginning of sequences to make it still work when combining causal attention masks. # TODO: remove this line once a fix regarding large negative values for attention mask is done. attention_mask = torch.cat( [torch.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], dim=-1 ) # Here we make the first sequence with all 0s as attention mask. # Currently, this will fail for `TFWav2Vec2Model`. This is caused by the different large negative # values, like `1e-4`, `1e-9`, `1e-30` and `-inf` for attention mask across models/frameworks. # TODO: enable this block once the large negative values thing is cleaned up. # (see https://github.com/huggingface/transformers/issues/14859) # attention_mask = torch.cat( # [torch.zeros_like(attention_mask[:1], dtype=attention_mask.dtype), attention_mask[1:]], # dim=0 # ) inputs_dict[k] = attention_mask # Don't copy this method to model specific test file! # TODO: remove this method once the issues are all fixed! def _postprocessing_to_ignore_test_cases(self, tf_outputs, pt_outputs, model_class): """For temporarily ignoring some failed test cases (issues to be fixed)""" tf_keys = {k for k, v in tf_outputs.items() if v is not None} pt_keys = {k for k, v in pt_outputs.items() if v is not None} key_differences = tf_keys.symmetric_difference(pt_keys) if model_class.__name__ in [ "FlaubertWithLMHeadModel", "FunnelForPreTraining", "ElectraForPreTraining", "XLMWithLMHeadModel", ]: for k in key_differences: if k in ["loss", "losses"]: tf_keys.discard(k) pt_keys.discard(k) elif model_class.__name__.startswith("GPT2"): # `TFGPT2` has `past_key_values` as a tensor while `GPT2` has it as a tuple. tf_keys.discard("past_key_values") pt_keys.discard("past_key_values") # create new outputs from the remaining fields new_tf_outputs = type(tf_outputs)(**{k: tf_outputs[k] for k in tf_keys}) new_pt_outputs = type(pt_outputs)(**{k: pt_outputs[k] for k in pt_keys}) return new_tf_outputs, new_pt_outputs # Copied from tests.test_modeling_tf_common.TFModelTesterMixin.check_pt_tf_outputs def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): """Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way. Args: model_class: The class of the model that is currently testing. For example, `TFBertModel`, TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative error messages. name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc. attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element being a named field in the output. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(tf_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is", ) # Don't copy this block to model specific test file! # TODO: remove this method and this line after issues are fixed tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class) tf_keys = [k for k, v in tf_outputs.items() if v is not None] pt_keys = [k for k, v in pt_outputs.items() if v is not None] self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in tf_keys]) self.check_pt_tf_outputs( tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(tf_outputs) in [tuple, list]: self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch") self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch") if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(tf_outputs), f"{name}: The tuple `attributes` should have the same length as `tf_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))]) for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes): if isinstance(pt_output, DynamicCache): pt_output = pt_output.to_legacy_cache() self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(tf_outputs, tf.Tensor): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is" ) tf_outputs = tf_outputs.numpy() pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(tf_outputs): tf_outputs = np.array([tf_outputs]) pt_outputs = np.array([pt_outputs]) tf_nans = np.isnan(tf_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[tf_nans] = 0 tf_outputs[tf_nans] = 0 pt_outputs[pt_nans] = 0 tf_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(tf_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}) for {model_class.__name__}", ) else: raise ValueError( "`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got" f" {type(tf_outputs)} instead." ) def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict): tf_inputs_dict = {} for key, tensor in pt_inputs_dict.items(): # skip key that does not exist in tf if isinstance(tensor, bool): tf_inputs_dict[key] = tensor elif key == "input_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "pixel_values": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) elif key == "input_features": tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) # other general float inputs elif tensor.is_floating_point(): tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32) else: tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32) return tf_inputs_dict def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) # send pytorch inputs to the correct device pt_inputs_dict = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items() } # send pytorch model to the correct device pt_model.to(torch_device) # Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences pt_model.eval() with torch.no_grad(): pt_outputs = pt_model(**pt_inputs_dict) tf_outputs = tf_model(tf_inputs_dict) # tf models returned loss is usually a tensor rather than a scalar. # (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`) # Change it here to a scalar to match PyTorch models' loss tf_loss = getattr(tf_outputs, "loss", None) if tf_loss is not None: tf_outputs.loss = tf.math.reduce_mean(tf_loss) self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model)) @is_pt_tf_cross_test def test_pt_tf_model_equivalence(self, allow_missing_keys=False): import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): self.skipTest(reason="transformers does not have TF version of this model yet") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency # of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`. # TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it. self._make_attention_mask_non_null(inputs_dict) tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config).eval() tf_model = tf_model_class(config) pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs_dict_with_labels = self._prepare_for_class( inputs_dict, model_class, # Not all models accept "labels" in the forward pass (yet :) ) return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False, ) # make sure only tf inputs are forward that actually exist in function args tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys()) # remove all head masks tf_input_keys.discard("head_mask") tf_input_keys.discard("cross_attn_head_mask") tf_input_keys.discard("decoder_head_mask") pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys} pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys} # For some models (e.g. base models), there is no label returned. # Set the input dict to `None` to avoid check outputs twice for the same input dicts. if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()): pt_inputs_dict_with_labels = None # Check we can load pt model in tf and vice-versa with model => model functions # Here requires `tf_inputs_dict` to build `tf_model` tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict) tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys ) pt_model = transformers.load_tf2_model_in_pytorch_model( pt_model, tf_model, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model( tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys ) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model( pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys ) # Original test: check without `labels` self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) # check with `labels` if pt_inputs_dict_with_labels: self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): self.skipTest(reason="No Flax model exists for this class") # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v.to("cpu")) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained( tmpdirname, from_flax=True, attn_implementation=fx_model.config._attn_implementation ) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_MAPPING_NAMES): continue model = model_class(config) model.to(torch_device) model.eval() model_forward_args = inspect.signature(model.forward).parameters if "inputs_embeds" not in model_forward_args: self.skipTest(reason="This model doesn't use `inputs_embeds`") inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1 wte = model.get_input_embeddings() if not self.is_encoder_decoder: input_ids = inputs["input_ids"] # some models infer position ids/attn mask differently when input ids # by check if pad_token let's make sure no padding is in input ids not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1 input_ids[input_ids == pad_token_id] = not_pad_token_id del inputs["input_ids"] inputs_embeds = wte(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) encoder_input_ids[encoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) decoder_input_ids[decoder_input_ids == pad_token_id] = max(0, pad_token_id + 1) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) inputs_embeds = wte(encoder_input_ids) decoder_inputs_embeds = wte(decoder_input_ids) with torch.no_grad(): out_ids = model(input_ids=encoder_input_ids, decoder_input_ids=decoder_input_ids, **inputs)[0] out_embeds = model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **inputs )[0] torch.testing.assert_close(out_embeds, out_ids) @require_non_xpu @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # some params shouldn't be scattered by nn.DataParallel # so just remove them if they are present. blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] for k in blacklist_non_batched_params: inputs_dict.pop(k, None) # move input tensors to cuda:O for k, v in inputs_dict.items(): if torch.is_tensor(v): inputs_dict[k] = v.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = nn.DataParallel(model) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch_gpu @require_torch_multi_gpu def test_model_parallelization(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") # a candidate for testing_utils def get_current_gpu_memory_use(): """returns a list of cuda memory allocations per GPU in MBs""" per_device_memory = [] for id in range(torch.cuda.device_count()): with torch.cuda.device(id): per_device_memory.append(torch.cuda.memory_allocated() >> 20) return per_device_memory # Needs a large model to see the difference. config = self.model_tester.get_large_model_config() for model_class in self.all_parallelizable_model_classes: torch.cuda.empty_cache() # 1. single gpu memory load + unload + memory measurements # Retrieve initial memory usage (can easily be ~0.6-1.5GB if cuda-kernels have been preloaded by previous tests) memory_at_start = get_current_gpu_memory_use() # Put model on device 0 and take a memory snapshot model = model_class(config) model.to("cuda:0") memory_after_model_load = get_current_gpu_memory_use() # The memory use on device 0 should be higher than it was initially. self.assertGreater(memory_after_model_load[0], memory_at_start[0]) del model gc.collect() torch.cuda.empty_cache() # 2. MP test # it's essential to re-calibrate the usage before the next stage memory_at_start = get_current_gpu_memory_use() # Spread model layers over multiple devices model = model_class(config) model.parallelize() memory_after_parallelization = get_current_gpu_memory_use() # Assert that the memory use on all devices is higher than it was when loaded only on CPU for n in range(len(model.device_map.keys())): self.assertGreater(memory_after_parallelization[n], memory_at_start[n]) # Assert that the memory use of device 0 is lower than it was when the entire model was loaded on it self.assertLess(memory_after_parallelization[0], memory_after_model_load[0]) # Assert that the memory use of device 1 is higher than it was when the entire model was loaded # on device 0 and device 1 wasn't used at all self.assertGreater(memory_after_parallelization[1], memory_after_model_load[1]) del model gc.collect() torch.cuda.empty_cache() @require_torch_gpu @require_torch_multi_gpu def test_model_parallel_equal_results(self): if not self.test_model_parallel: self.skipTest(reason="test_model_parallel is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_parallelizable_model_classes: inputs_dict = self._prepare_for_class(inputs_dict, model_class) def cast_to_device(dictionary, device): output = {} for k, v in dictionary.items(): if isinstance(v, torch.Tensor): output[k] = v.to(device) else: output[k] = v return output model = model_class(config) output = model(**cast_to_device(inputs_dict, "cpu")) model.parallelize() parallel_output = model(**cast_to_device(inputs_dict, "cuda:0")) for value, parallel_value in zip(output, parallel_output): if isinstance(value, torch.Tensor): torch.testing.assert_close(value, parallel_value.to("cpu"), rtol=1e-7, atol=1e-7) elif isinstance(value, (Tuple, List)): for value_, parallel_value_ in zip(value, parallel_value): torch.testing.assert_close(value_, parallel_value_.to("cpu"), rtol=1e-7, atol=1e-7) def check_device_map_is_respected(self, model, device_map): for param_name, param in model.named_parameters(): # Find device in device_map while len(param_name) > 0 and param_name not in device_map: param_name = ".".join(param_name.split(".")[:-1]) if param_name not in device_map: raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") param_device = device_map[param_name] if param_device in ["cpu", "disk"]: self.assertEqual(param.device, torch.device("meta")) elif param_device in ["mps"]: self.assertEqual(param.device, torch.device("mps")) else: # when loaded with device_map, `param_device` are integer values for cuda/xpu/npu/mlu self.assertEqual(param.device, torch.device(f"{torch_device}:{param_device}")) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_disk_offload_bin(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir, safe_serialization=False) with self.assertRaises(ValueError): max_size = int(self.model_split_percents[0] * model_size) max_memory = {0: max_size, "cpu": max_size} # This errors out cause it's missing an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} new_model = model_class.from_pretrained( tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_disk_offload_safetensors(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) max_size = int(self.model_split_percents[1] * model_size) max_memory = {0: max_size, "cpu": max_size} # This doesn't error out as it's in safetensors and doesn't need an offload folder new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_accelerate @mark.accelerate_tests @require_torch_accelerator def test_cpu_offload(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator def test_model_parallelism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class._no_split_modules is None: continue inputs_dict_class = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) torch.manual_seed(0) base_output = model(**inputs_dict_class) model_size = compute_module_sizes(model)[""] # We test several splits of sizes to make sure it works. max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] with tempfile.TemporaryDirectory() as tmp_dir: model.cpu().save_pretrained(tmp_dir) for max_size in max_gpu_sizes: max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} new_model = model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) # Making sure part of the model will actually end up offloaded self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) torch.manual_seed(0) new_output = new_model(**inputs_dict_class) if isinstance(base_output[0], tuple) and isinstance(new_output[0], tuple): [ torch.testing.assert_close(a, b, rtol=1e-5, atol=1e-5) for a, b in zip(base_output[0], new_output[0]) ] else: torch.testing.assert_close(base_output[0], new_output[0], rtol=1e-5, atol=1e-5) def test_problem_types(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() problem_types = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if model_class.__name__ not in [ *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), ]: continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}"): config.problem_type = problem_type["title"] config.num_labels = problem_type["num_labels"] model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) if problem_type["num_labels"] > 1: inputs["labels"] = inputs["labels"].unsqueeze(1).repeat(1, problem_type["num_labels"]) inputs["labels"] = inputs["labels"].to(problem_type["dtype"]) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=True) as warning_list: loss = model(**inputs).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class.__name__ not in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = AutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(RuntimeError): new_model_without_prefix = AutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = AutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) new_model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) logits = new_model(**inputs).logits self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = AutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) new_model_without_prefix.to(torch_device) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_mismatched_shapes_have_properly_initialized_weights(self): if not self.test_mismatched_shapes: self.skipTest(reason="test_missmatched_shapes is set to False") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: mappings = [ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, ] is_classication_model = any(model_class.__name__ in get_values(mapping) for mapping in mappings) if not is_classication_model: continue # TODO: ydshieh is_special_classes = model_class.__name__ in [ "wav2vec2.masked_spec_embed", "Wav2Vec2ForSequenceClassification", "CLIPForImageClassification", "RegNetForImageClassification", "ResNetForImageClassification", "UniSpeechSatForSequenceClassification", "Wav2Vec2BertForSequenceClassification", "PvtV2ForImageClassification", "Wav2Vec2ConformerForSequenceClassification", "WavLMForSequenceClassification", "SwiftFormerForImageClassification", "SEWForSequenceClassification", "BitForImageClassification", "SEWDForSequenceClassification", "SiglipForImageClassification", "HubertForSequenceClassification", "Swinv2ForImageClassification", "Data2VecAudioForSequenceClassification", "UniSpeechForSequenceClassification", "PvtForImageClassification", "ModernBertForSequenceClassification", "ModernBertForTokenClassification", "TimmWrapperForImageClassification", ] special_param_names = [ r"^bit\.", r"^classifier\.weight", r"^classifier\.bias", r"^classifier\..+\.weight", r"^classifier\..+\.bias", r"^data2vec_audio\.", r"^dist_head\.", r"^head\.", r"^hubert\.", r"^pvt\.", r"^pvt_v2\.", r"^regnet\.", r"^resnet\.", r"^sew\.", r"^sew_d\.", r"^swiftformer\.", r"^swinv2\.", r"^transformers\.models\.swiftformer\.", r"^timm_model\.", r"^unispeech\.", r"^unispeech_sat\.", r"^vision_model\.", r"^wav2vec2\.", r"^wav2vec2_bert\.", r"^wav2vec2_conformer\.", r"^wavlm\.", ] with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(configs_no_init) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(RuntimeError): new_model = model_class.from_pretrained(tmp_dir, num_labels=42) logger = logging.get_logger("transformers.modeling_utils") with CaptureLogger(logger) as cl: new_model = model_class.from_pretrained(tmp_dir, num_labels=42, ignore_mismatched_sizes=True) self.assertIn("the shapes did not match", cl.out) for name, param in new_model.named_parameters(): if param.requires_grad: param_mean = ((param.data.mean() * 1e9).round() / 1e9).item() if not ( is_special_classes and any(len(re.findall(target, name)) > 0 for target in special_param_names) ): self.assertIn( param_mean, [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: # Here we allow the parameters' mean to be in the range [-5.0, 5.0] instead of being # either `0.0` or `1.0`, because their initializations are not using # `config.initializer_factor` (or something similar). The purpose of this test is simply # to make sure they are properly initialized (to avoid very large value or even `nan`). self.assertGreaterEqual( param_mean, -5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) self.assertLessEqual( param_mean, 5.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist(self): # 1. Create a dummy class. Should have buffers as well? To make sure we test __init__ class MyClass(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config=None): super().__init__(config if config is not None else PretrainedConfig()) self.linear = nn.Linear(10, config.num_labels, bias=True) self.embedding = nn.Embedding(10, 10) self.std = 1 def _init_weights(self, module): if isinstance(module, nn.Linear): module.weight.data = nn.init.kaiming_uniform_(module.weight.data, np.sqrt(5)) if module.bias is not None: module.bias.data = module.bias.data.normal_(mean=0.0, std=self.std) # Used to make sure the weights with matched shape are loaded correctly config = PretrainedConfig() config.num_labels = 3 model = MyClass(config=config) # Used to make sure the weights with mismatched shape are properly initialized set_seed(0) config = PretrainedConfig() config.num_labels = 4 # not to init. the weights during the creation: to match the logic in `from_pretrained`, so we can keep the # same sequence of random ops in the execution path to allow us to compare `target_model` and `new_model` below # for `linear` part. with ContextManagers([no_init_weights(True)]): target_model = MyClass(config=config) target_model.apply(target_model._initialize_weights) with tempfile.TemporaryDirectory() as tmpdirname: state_dict = model.state_dict() del state_dict["linear.weight"] model.config.save_pretrained(tmpdirname) torch.save(state_dict, os.path.join(tmpdirname, "pytorch_model.bin")) set_seed(0) new_model = MyClass.from_pretrained(tmpdirname, num_labels=4, ignore_mismatched_sizes=True) for key in new_model.state_dict().keys(): # check weight values for weights with matched shapes are identical # (i.e. correctly loaded from the checkpoint) if key not in ["linear.weight", "linear.bias"]: max_diff = torch.max(torch.abs(model.state_dict()[key] - new_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `model` are not identical", ) else: # check we have some mismatched shapes self.assertNotEqual( model.state_dict()[key].shape, new_model.state_dict()[key].shape, msg=f"the weight shapes for {key} in `model` and `new_model` should differ", ) # check the weights with mismatched shape are properly initialized max_diff = torch.max(torch.abs(new_model.state_dict()[key] - target_model.state_dict()[key])) self.assertLessEqual( max_diff.item(), 1e-6, msg=f"the weight values for `{key}` in `new_model` and `target_model` are not identical", ) def test_model_is_small(self): # Just a consistency check to make sure we are not running tests on 80M parameter models. config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) num_params = model.num_parameters() assert ( num_params < 1000000 ), f"{model_class} is too big for the common tests ({num_params})! It should have 1M max." @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, 1:] = 1 dummy_attention_mask[:, :1] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[1:], logits[1:], atol=4e-2, rtol=4e-2) # check with inference + dropout model.train() _ = model_fa(dummy_input, **other_inputs) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence_right_padding(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is not None: dummy_attention_mask = dummy_attention_mask[:1] dummy_attention_mask[:, :-1] = 1 dummy_attention_mask[:, -1:] = 0 if model.config.is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[:1] outputs = model(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) outputs_fa = model_fa(dummy_input, decoder_input_ids=decoder_input_ids, output_hidden_states=True) else: outputs = model(dummy_input, output_hidden_states=True) outputs_fa = model_fa(dummy_input, output_hidden_states=True) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) if model.config.is_encoder_decoder: other_inputs = { "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) else: other_inputs = { "output_hidden_states": True, } if dummy_attention_mask is not None: other_inputs["attention_mask"] = dummy_attention_mask outputs = model(dummy_input, **other_inputs) outputs_fa = model_fa(dummy_input, **other_inputs) logits = ( outputs.hidden_states[-1] if not model.config.is_encoder_decoder else outputs.decoder_hidden_states[-1] ) logits_fa = ( outputs_fa.hidden_states[-1] if not model.config.is_encoder_decoder else outputs_fa.decoder_hidden_states[-1] ) assert torch.allclose(logits_fa[:-1], logits[:-1], atol=4e-2, rtol=4e-2) def test_attn_implementation_composite_models(self): """ Tests if composite models can receive a dict object as attn_implementation, where each key should be one of the sub-configs from the model's config. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_model_classes: if not self._is_composite: self.skipTest("Model is not a composite model.") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # set eager as it will be the one supported in all models # we just need to test if passing 'attn_implementation' as a dict fails or not attn_implementation_per_subconfig = {} for key in config.sub_configs.keys(): attn_implementation_per_subconfig[key] = "eager" config._attn_implementation = attn_implementation_per_subconfig model = model_class(config) for key in config.sub_configs.keys(): sub_config = getattr(model.config, key) self.assertTrue(sub_config._attn_implementation == "eager") for name, submodule in model.named_modules(): class_name = submodule.__class__.__name__ if ( "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name or "FlashAttention" in class_name ): raise ValueError(f"The eager model should not have SDPA/FA2 attention layers but got {class_name}") @require_torch_sdpa def test_sdpa_can_dispatch_non_composite_models(self): """ Tests if non-composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention". """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa or self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError(f"The eager model should not have SDPA attention layers but got {class_name}") @require_torch_sdpa def test_sdpa_can_dispatch_composite_models(self): """ Tests if composite models dispatch correctly on SDPA/eager when requested so when loading the model. This tests only by looking at layer names, as usually SDPA layers are calles "SDPAAttention". In contrast to the above test, this one checks if the "config._attn_implamentation" is a dict after the model is loaded, because we manually replicate requested attn implementation on each sub-config when loading. See https://github.com/huggingface/transformers/pull/32238 for more info The test tries to cover most general cases of composite models, VLMs with vision and text configs. Any model that has a different set of sub-configs has to overwrite this test. """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) vision_model_names = {"visual", "image_tower", "vision_tower", "vision_model"} language_model_names = {"language_model", "model", "text_model"} vision_model_name = [name for name in vision_model_names if hasattr(model_sdpa, name)][0] language_model_name = [name for name in language_model_names if hasattr(model_sdpa, name)][0] vision_model_sdpa = getattr(model, vision_model_name) language_model_sdpa = getattr(model, language_model_name) text_attn = "sdpa" if language_model_sdpa._supports_sdpa else "eager" vision_attn = "sdpa" if vision_model_sdpa._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(language_model_sdpa.config._attn_implementation == text_attn) self.assertTrue(vision_model_sdpa.config._attn_implementation == vision_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(getattr(model_eager, language_model_name).config._attn_implementation == "eager") self.assertTrue(getattr(model_eager, vision_model_name).config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") @parameterized.expand([("float16",), ("bfloat16",), ("float32",)]) @require_torch_sdpa def test_eager_matches_sdpa_inference(self, torch_dtype: str): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self.all_model_classes[0]._supports_sdpa: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") if torch_dtype == "float16" and not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") if torch_dtype == "bfloat16" and not is_torch_bf16_available_on_device(torch_device): self.skipTest( f"bfloat16 not supported on {torch_device} (on the specific device currently used, e.g. Nvidia T4 GPU)" ) # Not sure whether it's fine to put torch.XXX in a decorator if torch is not available so hacking it here instead. if torch_dtype == "float16": torch_dtype = torch.float16 elif torch_dtype == "bfloat16": torch_dtype = torch.bfloat16 elif torch_dtype == "float32": torch_dtype = torch.float32 atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 1e-2, ("cuda", True, torch.float16): 5e-3, } rtols = { ("cpu", False, torch.float32): 1e-4, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 1e-2, ("cpu", True, torch.float32): 1e-4, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 1e-2, ("cuda", False, torch.float32): 1e-4, ("cuda", False, torch.bfloat16): 1e-2, ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-4, ("cuda", True, torch.bfloat16): 3e-2, ("cuda", True, torch.float16): 5e-3, } def get_mean_reldiff(failcase, x, ref, atol, rtol): return f"{failcase}: mean relative difference: {((x - ref).abs() / (ref.abs() + 1e-12)).mean():.3e}, torch atol = {atol}, torch rtol = {rtol}" set_model_tester_for_less_flaky_test(self) for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) model = model_class(config) # FIXME: we deactivate boolean mask for models using "use_mask_token" in their constructors. # These models support masking only in the case `use_mask_token=True`. Otherwise they cannot consume an input mask. # This means that the class needs to be instantiated much later, after `use_mask` is set, which means a significant refactor of the code. # However masking there is not done at any layers that matters (i.e self-attention), therefore we can safely deactivate it. deactivate_mask = "use_mask_token" in inspect.signature(model_class).parameters is_encoder_decoder = model.config.is_encoder_decoder with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) try: model_sdpa = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="sdpa" ) except ValueError: model_sdpa = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) model_sdpa = model_sdpa.eval().to(torch_device, dtype=torch_dtype) model_eager = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device, dtype=torch_dtype) set_model_for_less_flaky_test(model_eager) set_model_for_less_flaky_test(model_sdpa) # We use these for loops instead of parameterized.expand just for the interest of avoiding loading/saving 16 times the model, # but it would be nicer to have an efficient way to use parameterized.expand fail_cases = [] for padding_side in ["left", "right"]: for use_mask in [False, True]: for output_attentions in [True, False]: can_output_attn = "output_attentions" in inspect.signature(model_sdpa.forward).parameters if not (self.has_attentions and can_output_attn) and output_attentions: continue # TODO: if we can also check with `batch_size=1` without being flaky? for batch_size in [7]: dummy_input = inputs_dict[model.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: dummy_input = dummy_input.to(torch_dtype) dummy_input = dummy_input[:batch_size] if dummy_input.shape[0] != batch_size: if dummy_input.dtype in [torch.float32, torch.bfloat16, torch.float16]: extension = torch.rand( batch_size - dummy_input.shape[0], *dummy_input.shape[1:], dtype=torch_dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) else: extension = torch.randint( high=5, size=(batch_size - dummy_input.shape[0], *dummy_input.shape[1:]), dtype=dummy_input.dtype, device=torch_device, ) dummy_input = torch.cat((dummy_input, extension), dim=0).to(torch_device) if not use_mask: dummy_attention_mask = None else: dummy_attention_mask = inputs_dict.get("attention_mask", None) if dummy_attention_mask is None: if is_encoder_decoder: seqlen = inputs_dict.get("decoder_input_ids", dummy_input).shape[-1] else: seqlen = dummy_input.shape[-1] dummy_attention_mask = ( torch.ones(batch_size, seqlen).to(torch.int64).to(torch_device) ) dummy_attention_mask = dummy_attention_mask[:batch_size] if dummy_attention_mask.shape[0] != batch_size: extension = torch.ones( batch_size - dummy_attention_mask.shape[0], *dummy_attention_mask.shape[1:], dtype=dummy_attention_mask.dtype, device=torch_device, ) dummy_attention_mask = torch.cat((dummy_attention_mask, extension), dim=0) dummy_attention_mask = dummy_attention_mask.to(torch_device) dummy_attention_mask[:] = 1 if padding_side == "left": dummy_attention_mask[-1, :2] = 0 dummy_attention_mask[-1, 2:] = 1 elif padding_side == "right": dummy_attention_mask[-1, -2:] = 0 dummy_attention_mask[-1, :-2] = 1 for enable_kernels in [False, True]: failcase = f"padding_side={padding_side}, use_mask={use_mask}, enable_kernels={enable_kernels}" if is_encoder_decoder: decoder_input_ids = inputs_dict.get("decoder_input_ids", dummy_input)[ :batch_size ] if decoder_input_ids.shape[0] != batch_size: extension = torch.ones( batch_size - decoder_input_ids.shape[0], *decoder_input_ids.shape[1:], dtype=decoder_input_ids.dtype, device=torch_device, ) decoder_input_ids = torch.cat((decoder_input_ids, extension), dim=0) decoder_input_ids = decoder_input_ids.to(torch_device) # TODO: never an `attention_mask` arg here? processed_inputs = { model.main_input_name: dummy_input, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": dummy_attention_mask, "output_hidden_states": True, } else: processed_inputs = { model.main_input_name: dummy_input, "output_hidden_states": True, } # Otherwise fails for e.g. WhisperEncoderModel if "attention_mask" in inspect.signature(model_eager.forward).parameters: processed_inputs["attention_mask"] = dummy_attention_mask if ( self.has_attentions and "output_attentions" in inspect.signature(model_sdpa.forward).parameters ): processed_inputs["output_attentions"] = output_attentions if not deactivate_mask and ( "bool_masked_pos" in inspect.signature(model_eager.forward).parameters ): dummy_mask = torch.ones((self.model_tester.num_masks,)) # In case of additional token (like class) we define a custom `mask_length` if hasattr(self.model_tester, "mask_length"): mask_length = self.model_tester.mask_length - dummy_mask.size(0) else: mask_length = self.model_tester.seq_length - dummy_mask.size(0) dummy_mask = torch.cat([dummy_mask, torch.zeros(mask_length)]) dummy_bool_masked_pos = dummy_mask.expand(batch_size, -1).bool() processed_inputs["bool_masked_pos"] = dummy_bool_masked_pos.to(torch_device) if "noise" in inspect.signature(model_eager.forward).parameters: np.random.seed(2) num_patches = int( (self.model_tester.image_size // self.model_tester.patch_size) ** 2 ) noise = np.random.uniform(size=(batch_size, num_patches)) processed_inputs["noise"] = torch.from_numpy(noise) # TODO: test gradients as well (& for FA2 as well!) with torch.no_grad(): with sdpa_kernel( enable_flash=enable_kernels, enable_math=True, enable_mem_efficient=enable_kernels, ): prepared_inputs = self._prepare_for_class(processed_inputs, model_class) outputs_eager = model_eager(**prepared_inputs) outputs_sdpa = model_sdpa(**prepared_inputs) if hasattr(outputs_eager, "vision_hidden_states"): logits_eager = outputs_eager.vision_hidden_states[-1] logits_sdpa = outputs_sdpa.vision_hidden_states[-1] else: logits_eager = ( outputs_eager.hidden_states[-1] if not is_encoder_decoder else outputs_eager.decoder_hidden_states[-1] ) logits_sdpa = ( outputs_sdpa.hidden_states[-1] if not is_encoder_decoder else outputs_sdpa.decoder_hidden_states[-1] ) if torch_device in ["cpu", "cuda"]: atol = atols[torch_device, enable_kernels, torch_dtype] rtol = rtols[torch_device, enable_kernels, torch_dtype] elif torch_device == "xpu": # As of PyTorch 2.5 XPU backend supports only torch.nn.attention.SDPBackend.MATH # which is implemented on PyTorch level using aten operators and is # device agnostic with respect to implementation of each aten operator. atol = atols["cuda", False, torch_dtype] rtol = rtols["cuda", False, torch_dtype] else: atol = 1e-7 rtol = 1e-4 # Masked tokens output slightly deviates - we don't mind that. if use_mask: _logits_sdpa = torch.zeros_like(input=logits_sdpa) _logits_eager = torch.zeros_like(input=logits_eager) _logits_sdpa[:-1] = logits_sdpa[:-1] _logits_eager[:-1] = logits_eager[:-1] if padding_side == "left": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, 2:] _logits_eager[-1:, 2:] = logits_eager[-1:, 2:] elif padding_side == "right": _logits_sdpa[-1:, 2:] = logits_sdpa[-1:, :-2] _logits_eager[-1:, 2:] = logits_eager[-1:, :-2] logits_sdpa = _logits_sdpa logits_eager = _logits_eager results = [ torch.allclose(_logits_sdpa, _logits_eager, atol=atol, rtol=rtol) for (_logits_sdpa, _logits_eager) in zip(logits_sdpa, logits_eager) ] # If 80% batch elements have matched results, it's fine if np.mean(results) < 0.8: fail_cases.append( get_mean_reldiff(failcase, logits_sdpa, logits_eager, atol, rtol) ) self.assertTrue(len(fail_cases) == 0, "\n".join(fail_cases)) @require_torch_sdpa @require_torch_gpu @slow def test_sdpa_can_dispatch_on_flash(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") torch.compiler.reset() compute_capability = torch.cuda.get_device_capability() major, _ = compute_capability if not torch.version.cuda or major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["llava", "llava_next", "vipllava", "video_llava"]: self.skipTest( reason="Llava-like models currently (transformers==4.39.1) requires an attention_mask input" ) if config.model_type in ["paligemma"]: self.skipTest( "PaliGemma-like models currently (transformers==4.41.0) requires an attention_mask input" ) if config.model_type in ["idefics", "idefics2", "idefics3"]: self.skipTest(reason="Idefics currently (transformers==4.39.1) requires an image_attention_mask input") if config.model_type in ["sam"]: self.skipTest(reason="SAM requires an attention_mask input for relative positional embeddings") model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) with sdpa_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): _ = model(**inputs_dict) @require_non_xpu @require_torch_sdpa @require_torch_accelerator @slow def test_sdpa_can_compile_dynamic(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") torch.compiler.reset() if "cuda" in torch_device: compute_capability = torch.cuda.get_device_capability() major, _ = compute_capability if not torch.version.cuda or major < 8: self.skipTest(reason="This test requires an NVIDIA GPU with compute capability >= 8.0") for model_class in self.all_model_classes: if not model_class._supports_sdpa: self.skipTest(f"{model_class.__name__} does not support SDPA") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() inputs_dict = self._prepare_for_class(inputs_dict, model_class) if config.model_type in ["dbrx"]: self.skipTest( "DBRX (transformers==4.40) requires a modification to support dynamic shapes with compile." ) model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch.float16, attn_implementation="sdpa") model.to(torch_device) # For PyTorch 2.1 - 2.3.0 set `dynamic=True`. In the future setting `dynamic=None` and using `torch._dynamo.mark_dynamic()` # on input tensors will be required. `mark_dynamic` currently raises inconsistent shape errors. model = torch.compile(model, dynamic=True) inputs_dict.pop("attention_mask", None) inputs_dict.pop("decoder_attention_mask", None) for name, inp in inputs_dict.items(): if isinstance(inp, torch.Tensor) and inp.dtype in [torch.float32, torch.float16]: inputs_dict[name] = inp.to(torch.float16) # use no_grad to save some memory with torch.no_grad(): _ = model(**inputs_dict) @require_torch_sdpa def test_sdpa_matches_eager_sliding_window(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") WINDOW_ATTENTION_MODELS = ["mistral", "mixtral", "qwen2", "qwen_moe", "starcoder2"] if len(self.all_generative_model_classes) == 0: self.skipTest(f"No generative model classes for {self.__class__.__name__}") for model_class in self.all_generative_model_classes: if model_class._supports_sdpa: self.skipTest(reason="Model architecture does not support attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if config.model_type not in WINDOW_ATTENTION_MODELS: self.skipTest(f"{config.model_type} does not use window attention") config.sliding_window = 2 dummy_input = inputs_dict[model_class.main_input_name] attention_mask = inputs_dict["attention_mask"] self.assertTrue(dummy_input.ndim == 2) self.assertTrue(dummy_input.shape[1] > 6) with tempfile.TemporaryDirectory() as tmpdir: with torch.device(torch_device): model_eager = AutoModelForCausalLM.from_config( config, attn_implementation="eager", torch_dtype=torch.float32 ) model_eager.save_pretrained(tmpdir) with torch.device(torch_device): model_sdpa = AutoModelForCausalLM.from_pretrained( tmpdir, attn_implementation="sdpa", torch_dtype=torch.float32 ) model_eager = model_eager.eval() model_sdpa = model_sdpa.eval() with torch.no_grad(): with sdpa_kernel(enable_flash=False, enable_math=True, enable_mem_efficient=False): res_eager = model_eager(**inputs_dict, return_dict=False)[0] res_sdpa = model_sdpa(**inputs_dict, return_dict=False)[0] # Only non-padding tokens are expected to match. self.assertTrue( torch.allclose(res_eager[attention_mask == 1], res_sdpa[attention_mask == 1], rtol=1e-4, atol=1e-4) ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_can_dispatch_composite_models(self): """ Tests if composite models can dispatch on FA2 if the sub-models support FA2. The tests is needed as we handle differently composite models and we cannot check them with above tests. If any of the sub-models does not support FA2, we'll raise an error when dispatching that particular sub-model. Otherwise we dispatch safely in all sub-models, where "sub-models" are specific backbone models (LM/vision/audio/etc) """ if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not is_torch_fp16_available_on_device(torch_device): self.skipTest(f"float16 not supported on {torch_device} (on the specific device currently used)") torch_dtype = torch.float16 for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) if not self._is_composite: self.skipTest("This model is not a composte model!") with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype) supports_fa2_all_modules = all( module._supports_flash_attn_2 for name, module in model.named_modules() if isinstance(module, PreTrainedModel) and name != "" ) if not supports_fa2_all_modules: with self.assertRaises(ValueError): model_fa2 = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="flash_attention_2" ) else: model_fa2 = model_class.from_pretrained( tmpdirname, torch_dtype=torch_dtype, attn_implementation="flash_attention_2" ) for key in model_fa2.config: if isinstance(getattr(model_fa2.config, key), PretrainedConfig): sub_config = getattr(model_fa2.config, key) self.assertTrue(sub_config._attn_implementation == "flash_attention_2") has_fa2 = False for name, submodule in model_fa2.named_modules(): class_name = submodule.__class__.__name__ if "FlashAttention" in class_name: has_fa2 = True break if not has_fa2: raise ValueError("The FA2 model should have FA2 layers") @require_flash_attn @require_torch_gpu @require_bitsandbytes @mark.flash_attn_test @slow def test_flash_attn_2_fp32_ln(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) dummy_input = inputs_dict[model.main_input_name] dummy_attention_mask = inputs_dict.get("attention_mask", torch.ones_like(dummy_input)) batch_size = dummy_attention_mask.shape[0] is_padding_right = dummy_attention_mask[:, -1].sum().item() != batch_size # To avoid errors with padding_side=="right" if is_padding_right: dummy_attention_mask = torch.ones_like(dummy_input) model = model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, load_in_4bit=True, ) for _, param in model.named_parameters(): # upcast only layer norms if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if model.config.is_encoder_decoder: dummy_decoder_input_ids = inputs_dict["decoder_input_ids"] dummy_decoder_attention_mask = inputs_dict["decoder_attention_mask"] _ = model(dummy_input, decoder_input_ids=dummy_decoder_input_ids) # with attention mask _ = model( dummy_input, attention_mask=dummy_attention_mask, decoder_input_ids=dummy_decoder_input_ids, decoder_attention_mask=dummy_decoder_attention_mask, ) else: _ = model(dummy_input) # with attention mask _ = model(dummy_input, attention_mask=dummy_attention_mask) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") max_new_tokens = 30 for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict: self.skipTest("Model dummy inputs should contain padding in their attention mask") dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.bfloat16]: dummy_input = dummy_input.to(torch.float16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # ensure left padding, to adapt for some models if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, torch_dtype=torch.float16, attn_implementation="flash_attention_2", low_cpu_mem_usage=True, ) .to(torch_device) .eval() ) # flatten padfree_inputs_dict = { k: v[dummy_attention_mask.bool()].unsqueeze(0) for k, v in inputs_dict.items() if not k == "attention_mask" } # add position_ids padfree_inputs_dict["position_ids"] = ( torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()]) .long() .unsqueeze(0) .to(torch_device) ) res_padded = model(**inputs_dict) res_padfree = model(**padfree_inputs_dict) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] torch.testing.assert_close(logits_padded.argmax(-1), logits_padfree.argmax(-1), rtol=0, atol=0) # acceptable numerical instability tol = torch.finfo(torch.float16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) @is_pt_tf_cross_test def test_tf_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning if not hasattr(transformers, tf_model_class_name): self.skipTest(reason="transformers does not have this model in TF version yet") tf_model_class = getattr(transformers, tf_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) @is_pt_flax_cross_test def test_flax_from_pt_safetensors(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() flax_model_class_name = "Flax" + model_class.__name__ # Add the "Flax at the beginning if not hasattr(transformers, flax_model_class_name): self.skipTest(reason="transformers does not have this model in Flax version yet") flax_model_class = getattr(transformers, flax_model_class_name) pt_model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname, safe_serialization=True) flax_model_1 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) pt_model.save_pretrained(tmpdirname, safe_serialization=False) flax_model_2 = flax_model_class.from_pretrained(tmpdirname, from_pt=True) # Check models are equal self.assertTrue(check_models_equal(flax_model_1, flax_model_2)) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_from_config(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") for model_class in self.all_generative_model_classes: if not model_class._supports_flash_attn_2: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, _ = self.model_tester.prepare_config_and_inputs_for_common() # TODO: to change it in the future with other relevant auto classes fa2_model = model_class._from_config( config, attn_implementation="flash_attention_2", torch_dtype=torch.bfloat16 ).to(torch_device) dummy_input = torch.LongTensor([[0, 2, 3, 4], [0, 2, 3, 4]]).to(torch_device) dummy_attention_mask = torch.LongTensor([[1, 1, 1, 1], [0, 1, 1, 1]]).to(torch_device) _ = fa2_model(input_ids=dummy_input, attention_mask=dummy_attention_mask) with tempfile.TemporaryDirectory() as tmpdirname: fa2_model.save_pretrained(tmpdirname) model_from_pretrained = model_class.from_pretrained(tmpdirname) self.assertTrue(model_from_pretrained.config._attn_implementation != "flash_attention_2") def _get_custom_4d_mask_test_data(self): # Sequence in which all but the last token is the same input_ids = torch.tensor( [[10, 11, 12, 13], [10, 11, 12, 14], [10, 11, 12, 15]], device=torch_device, dtype=torch.int64 ) position_ids = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64) # Combining common prefix with the unique ending tokens: input_ids_shared_prefix = torch.cat([input_ids[0][:-1], input_ids[:, -1]]).unsqueeze(0) # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_shared_prefix = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], ) # inverting the attention mask mask_dtype = torch.float32 min_dtype = torch.finfo(mask_dtype).min mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=mask_dtype, device=torch_device) * min_dtype # Creating a position_ids tensor. note the repeating figures in the end. position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix def test_custom_4d_attention_mask(self): if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if len(self.all_generative_model_classes) == 0: self.skipTest( reason="Model architecture has no generative classes, and thus not necessarily supporting 4D masks" ) set_model_tester_for_less_flaky_test(self) for model_class in self.all_generative_model_classes: if not model_class._supports_static_cache: self.skipTest(f"{model_class.__name__} is not guaranteed to work with custom 4D attention masks") config, _ = self.model_tester.prepare_config_and_inputs_for_common() set_config_for_less_flaky_test(config) if getattr(config, "sliding_window", 0) is not None and getattr(config, "sliding_window", 0) > 0: self.skipTest(f"{model_class.__name__} with sliding window attention is not supported by this test") model = model_class(config).to(device=torch_device, dtype=torch.float32) set_model_for_less_flaky_test(model) ( input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix, ) = self._get_custom_4d_mask_test_data() logits = model.forward(input_ids, position_ids=position_ids).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) @slow @require_torch_accelerator def test_torch_compile_for_training(self): if version.parse(torch.__version__) < version.parse("2.3"): self.skipTest(reason="This test requires torch >= 2.3 to run.") if not hasattr(self, "_torch_compile_train_cls"): self.skipTest(f"{self.__class__.__name__} doesn't have the attribute `_torch_compile_train_cls`.") config, _ = self.model_tester.prepare_config_and_inputs_for_common() cls = self._torch_compile_train_cls model = cls(config).to(torch_device) inputs = { "input_ids": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), "attention_mask": torch.tensor( [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], dtype=torch.int64, device=torch_device, ), "position_ids": torch.arange(0, 10, device=torch_device).unsqueeze(0), "labels": torch.randint(low=1, high=model.config.vocab_size, size=(2, 10), device=torch_device), } # eager backward set_seed(42) loss = model(**inputs).loss loss.backward() params = {name: param.grad.clone().detach().cpu() for name, param in model.named_parameters()} model.zero_grad() del loss model = torch.compile(model, fullgraph=True, mode="reduce-overhead") # forward compilation set_seed(42) loss = model(**inputs).loss # backward compilation loss.backward() # check grad matches for name, param in model._orig_mod.named_parameters(): torch.testing.assert_close(param.grad.detach().cpu(), params[name], rtol=1e-4, atol=1e-4) def test_forward_with_logits_to_keep(self): for model_class in self.all_generative_model_classes: if "logits_to_keep" not in set(inspect.signature(model_class.forward).parameters.keys()): self.skipTest(reason="This model does not support `logits_to_keep` argument.") config, inputs = self.model_tester.prepare_config_and_inputs_for_common() batch_size, sequence_length = inputs["input_ids"].shape vocab_size = config.get_text_config().vocab_size model = model_class(config).to(device=torch_device).eval() # some models have labels but `logits_to_keep` should not be used in train mode _ = inputs.pop("labels", None) # logits_to_keep=0 is a special case meaning "keep all logits" all_logits = model(**inputs, logits_to_keep=0).logits last_token_logits = model(**inputs, logits_to_keep=1).logits # Assert all shapes are correct self.assertEqual(tuple(all_logits.shape), (batch_size, sequence_length, vocab_size)) self.assertEqual(tuple(last_token_logits.shape), (batch_size, 1, vocab_size)) # Assert the last tokens are actually the same (except for the natural fluctuation due to order of FP ops) torch.testing.assert_close(all_logits[:, -1:, :], last_token_logits, rtol=1e-5, atol=1e-5) @require_torch_gpu def test_flex_attention_with_grads(self): for model_class in self.all_model_classes: if not model_class._supports_flex_attn: self.skipTest(reason="This model does not support flex attention") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config._attn_implementation = "flex_attention" model = model_class(config).to(device=torch_device, dtype=torch.float16) self.assertTrue(model.config._attn_implementation == "flex_attention") # If this does not raise an error, the test passes (see https://github.com/huggingface/transformers/pull/35605) _ = model(inputs_dict["input_ids"].to(torch_device)) global_rng = random.Random() def ids_tensor(shape, vocab_size, rng=None, name=None): # Creates a random int32 tensor of the shape within the vocab size if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) return torch.tensor(data=values, dtype=torch.long, device=torch_device).view(shape).contiguous() def random_attention_mask(shape, rng=None, name=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=None, name=None) # make sure that at least one token is attended to for each batch # we choose the 1st token so this property of `at least one being non-zero` still holds after applying causal mask attn_mask[:, 0] = 1 return attn_mask def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return torch.tensor(data=values, dtype=torch.float, device=torch_device).view(shape).contiguous()
transformers/tests/test_modeling_common.py/0
{ "file_path": "transformers/tests/test_modeling_common.py", "repo_id": "transformers", "token_count": 118106 }
216
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_accelerate, require_fp8, require_fsdp, require_torch_multi_gpu, ) if is_torch_available(): import torch import torch.distributed import torch.utils.data from transformers import ( AutoModelForCausalLM, AutoTokenizer, DataCollatorForSeq2Seq, EvalPrediction, GenerationConfig, HfArgumentParser, PreTrainedTokenizerBase, Seq2SeqTrainer, Seq2SeqTrainingArguments, ) class DummyTextDataset(torch.utils.data.Dataset[str]): def __init__(self, tokenizer: PreTrainedTokenizerBase) -> None: data = 4 * [ "Hello world!", "The quick brown fox jumps over the lazy dog.", ] self.data = [ {k: v.squeeze(0) for k, v in tokenizer(item, return_tensors="pt", return_attention_mask=True).items()} for item in data ] for item in self.data: item["labels"] = item["input_ids"] def __len__(self) -> int: return len(self.data) def __getitem__(self, i: int) -> str: return self.data[i] class TestFSDPTrainer(TestCasePlus): @require_accelerate @require_torch_multi_gpu @require_fsdp def test_trainer(self): output_dir = self.get_auto_remove_tmp_dir() cmd = [ "accelerate", "launch", "--use_fsdp", "--main_process_port", f"{get_torch_dist_unique_port()}", "--num_processes", f"{torch.cuda.device_count()}", "--fsdp_transformer_layer_cls_to_wrap", "GPT2Block", f"{self.test_file_dir}/test_trainer_fsdp.py", "--output_dir", f"{output_dir}", "--report_to", "none", ] execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestFSDPTrainerFP8(TestCasePlus): @require_accelerate @require_torch_multi_gpu @require_fsdp @require_fp8 def test_trainer(self): output_dir = self.get_auto_remove_tmp_dir() cmd = [ "accelerate", "launch", "--use_fsdp", "--main_process_port", f"{get_torch_dist_unique_port()}", "--num_processes", f"{torch.cuda.device_count()}", "--mixed_precision", "fp8", "--fsdp_transformer_layer_cls_to_wrap", "GPT2Block", f"{self.test_file_dir}/test_trainer_fsdp.py", "--output_dir", f"{output_dir}", "--report_to", "none", ] execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call class TestFSDPTrainerWrap(TestCasePlus): @require_accelerate @require_torch_multi_gpu @require_fsdp def test_trainer(self): output_dir = self.get_auto_remove_tmp_dir() cmd = [ "accelerate", "launch", "--use_fsdp", "--main_process_port", f"{get_torch_dist_unique_port()}", "--num_processes", f"{torch.cuda.device_count()}", "--fsdp_transformer_layer_cls_to_wrap", "GPT2Block", f"{self.test_file_dir}/test_trainer_fsdp.py", "--output_dir", f"{output_dir}", "--report_to", "none", "--auto_find_batch_size", "True", ] execute_subprocess_async(cmd, env=self.get_env()) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": parser = HfArgumentParser((Seq2SeqTrainingArguments,)) training_args = parser.parse_args_into_dataclasses()[0] training_args.per_device_eval_batch_size = 1 training_args.use_legacy_prediction_loop = False training_args.predict_with_generate = True training_args.generation_config = GenerationConfig(max_length=30) pretrained_model_name = "hf-internal-testing/tiny-random-gpt2" tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name) tokenizer.pad_token = tokenizer.eos_token device = torch.device(torch.distributed.get_rank()) model = AutoModelForCausalLM.from_pretrained(pretrained_model_name).to(device) def compute_metrics(p: EvalPrediction) -> Dict[str, bool]: return {"accuracy": (p.predictions == p.label_ids).mean()} trainer = Seq2SeqTrainer( model=model, args=training_args, data_collator=DataCollatorForSeq2Seq(tokenizer, model), eval_dataset=DummyTextDataset(tokenizer), compute_metrics=compute_metrics, ) metrics = trainer.evaluate()
transformers/tests/trainer/test_trainer_fsdp.py/0
{ "file_path": "transformers/tests/trainer/test_trainer_fsdp.py", "repo_id": "transformers", "token_count": 2775 }
217
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import unittest from unittest.mock import patch from transformers.testing_utils import CaptureStd, require_torch class CLITest(unittest.TestCase): @patch("sys.argv", ["fakeprogrampath", "env"]) def test_cli_env(self): # test transformers-cli env import transformers.commands.transformers_cli with CaptureStd() as cs: transformers.commands.transformers_cli.main() self.assertIn("Python version", cs.out) self.assertIn("Platform", cs.out) self.assertIn("Using distributed or parallel set-up in script?", cs.out) @require_torch @patch("sys.argv", ["fakeprogrampath", "download", "hf-internal-testing/tiny-random-gptj", "--cache-dir", "/tmp"]) def test_cli_download(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--tiny-random-gptj", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--tiny-random-gptj self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/refs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--tiny-random-gptj/snapshots")) @require_torch @patch( "sys.argv", [ "fakeprogrampath", "download", "hf-internal-testing/test_dynamic_model_with_tokenizer", "--trust-remote-code", "--cache-dir", "/tmp", ], ) def test_cli_download_trust_remote(self): import transformers.commands.transformers_cli # # remove any previously downloaded model to start clean shutil.rmtree("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer", ignore_errors=True) # run the command transformers.commands.transformers_cli.main() # check if the model files are downloaded correctly on /tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/blobs")) self.assertTrue(os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/refs")) self.assertTrue( os.path.exists("/tmp/models--hf-internal-testing--test_dynamic_model_with_tokenizer/snapshots") )
transformers/tests/utils/test_cli.py/0
{ "file_path": "transformers/tests/utils/test_cli.py", "repo_id": "transformers", "token_count": 1250 }
218
# coding=utf-8 # Copyright 2020 The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import unittest from dataclasses import dataclass from typing import Optional from transformers import AlbertForMaskedLM from transformers.testing_utils import require_torch from transformers.utils import ModelOutput, is_torch_available if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_2 @dataclass class ModelOutputTest(ModelOutput): a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputTester(unittest.TestCase): def test_get_attributes(self): x = ModelOutputTest(a=30) self.assertEqual(x.a, 30) self.assertIsNone(x.b) self.assertIsNone(x.c) with self.assertRaises(AttributeError): _ = x.d def test_index_with_ints_and_slices(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) x = ModelOutputTest(a=30, c=10) self.assertEqual(x[0], 30) self.assertEqual(x[1], 10) self.assertEqual(x[:2], (30, 10)) self.assertEqual(x[:], (30, 10)) def test_index_with_strings(self): x = ModelOutputTest(a=30, b=10) self.assertEqual(x["a"], 30) self.assertEqual(x["b"], 10) with self.assertRaises(KeyError): _ = x["c"] x = ModelOutputTest(a=30, c=10) self.assertEqual(x["a"], 30) self.assertEqual(x["c"], 10) with self.assertRaises(KeyError): _ = x["b"] def test_dict_like_properties(self): x = ModelOutputTest(a=30) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(list(x.values()), [30]) self.assertEqual(list(x.items()), [("a", 30)]) self.assertEqual(list(x), ["a"]) x = ModelOutputTest(a=30, b=10) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("b", 10)]) self.assertEqual(list(x), ["a", "b"]) x = ModelOutputTest(a=30, c=10) self.assertEqual(list(x.keys()), ["a", "c"]) self.assertEqual(list(x.values()), [30, 10]) self.assertEqual(list(x.items()), [("a", 30), ("c", 10)]) self.assertEqual(list(x), ["a", "c"]) with self.assertRaises(Exception): x = x.update({"d": 20}) with self.assertRaises(Exception): del x["a"] with self.assertRaises(Exception): _ = x.pop("a") with self.assertRaises(Exception): _ = x.setdefault("d", 32) def test_set_attributes(self): x = ModelOutputTest(a=30) x.a = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_set_keys(self): x = ModelOutputTest(a=30) x["a"] = 10 self.assertEqual(x.a, 10) self.assertEqual(x["a"], 10) def test_instantiate_from_dict(self): x = ModelOutputTest({"a": 30, "b": 10}) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) def test_instantiate_from_iterator(self): x = ModelOutputTest([("a", 30), ("b", 10)]) self.assertEqual(list(x.keys()), ["a", "b"]) self.assertEqual(x.a, 30) self.assertEqual(x.b, 10) with self.assertRaises(ValueError): _ = ModelOutputTest([("a", 30), (10, 10)]) x = ModelOutputTest(a=(30, 30)) self.assertEqual(list(x.keys()), ["a"]) self.assertEqual(x.a, (30, 30)) @require_torch def test_torch_pytree(self): # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) # this is important for DistributedDataParallel gradient synchronization with static_graph=True import torch.utils._pytree as pytree x = ModelOutput({"a": 1.0, "c": 2.0}) self.assertFalse(pytree._is_leaf(x)) x = ModelOutputTest(a=1.0, c=2.0) self.assertFalse(pytree._is_leaf(x)) expected_flat_outs = [1.0, 2.0] expected_tree_spec = pytree.TreeSpec(ModelOutputTest, ["a", "c"], [pytree.LeafSpec(), pytree.LeafSpec()]) actual_flat_outs, actual_tree_spec = pytree.tree_flatten(x) self.assertEqual(expected_flat_outs, actual_flat_outs) self.assertEqual(expected_tree_spec, actual_tree_spec) unflattened_x = pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) self.assertEqual(x, unflattened_x) if is_torch_greater_or_equal_than_2_2: self.assertEqual( pytree.treespec_dumps(actual_tree_spec), '[1, {"type": "tests.utils.test_model_output.ModelOutputTest", "context": "[\\"a\\", \\"c\\"]", "children_spec": [{"type": null, "context": null, "children_spec": []}, {"type": null, "context": null, "children_spec": []}]}]', ) # TODO: @ydshieh @unittest.skip(reason="CPU OOM") @require_torch def test_export_serialization(self): if not is_torch_greater_or_equal_than_2_2: self.skipTest(reason="Export serialization requires torch >= 2.2.0") model_cls = AlbertForMaskedLM model_config = model_cls.config_class() model = model_cls(model_config) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} ep = torch.export.export(model, (), input_dict) buffer = io.BytesIO() torch.export.save(ep, buffer) buffer.seek(0) loaded_ep = torch.export.load(buffer) input_dict = {"input_ids": torch.randint(0, 30000, (1, 512), dtype=torch.int64, requires_grad=False)} assert torch.allclose(model(**input_dict).logits, loaded_ep(**input_dict).logits) class ModelOutputTestNoDataclass(ModelOutput): """Invalid test subclass of ModelOutput where @dataclass decorator is not used""" a: float b: Optional[float] = None c: Optional[float] = None class ModelOutputSubclassTester(unittest.TestCase): def test_direct_model_output(self): # Check that direct usage of ModelOutput instantiates without errors ModelOutput({"a": 1.1}) def test_subclass_no_dataclass(self): # Check that a subclass of ModelOutput without @dataclass is invalid # A valid subclass is inherently tested other unit tests above. with self.assertRaises(TypeError): ModelOutputTestNoDataclass(a=1.1, b=2.2, c=3.3)
transformers/tests/utils/test_model_output.py/0
{ "file_path": "transformers/tests/utils/test_model_output.py", "repo_id": "transformers", "token_count": 3188 }
219
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py PATH_TO_TRANSFORMERS = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. transformers = direct_transformers_import(PATH_TO_TRANSFORMERS) CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased)` _re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)") CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { "DecisionTransformerConfig", "EncoderDecoderConfig", "MusicgenConfig", "RagConfig", "SpeechEncoderDecoderConfig", "TimmBackboneConfig", "TimmWrapperConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", "LlamaConfig", "GraniteConfig", "GraniteMoeConfig", } def get_checkpoint_from_config_class(config_class): checkpoint = None # source code of `config_class` config_source = inspect.getsource(config_class) checkpoints = _re_checkpoint.findall(config_source) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/"): ckpt_link = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" if ckpt_link == ckpt_link_from_name: checkpoint = ckpt_name break return checkpoint def check_config_docstrings_have_checkpoints(): configs_without_checkpoint = [] for config_class in list(CONFIG_MAPPING.values()): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue checkpoint = get_checkpoint_from_config_class(config_class) name = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(name) if len(configs_without_checkpoint) > 0: message = "\n".join(sorted(configs_without_checkpoint)) raise ValueError( f"The following configurations don't contain any valid checkpoint:\n{message}\n\n" "The requirement is to include a link pointing to one of the models of this architecture in the " "docstring of the config classes listed above. The link should have be a markdown format like " "[myorg/mymodel](https://huggingface.co/myorg/mymodel)." ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
transformers/utils/check_config_docstrings.py/0
{ "file_path": "transformers/utils/check_config_docstrings.py", "repo_id": "transformers", "token_count": 1326 }
220
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that sorts the imports in the custom inits of Transformers. Transformers uses init files that delay the import of an object to when it's actually needed. This is to avoid the main init importing all models, which would make the line `import transformers` very slow when the user has all optional dependencies installed. The inits with delayed imports have two halves: one definining a dictionary `_import_structure` which maps modules to the name of the objects in each module, and one in `TYPE_CHECKING` which looks like a normal init for type-checkers. `isort` or `ruff` properly sort the second half which looks like traditionl imports, the goal of this script is to sort the first half. Use from the root of the repo with: ```bash python utils/custom_init_isort.py ``` which will auto-sort the imports (used in `make style`). For a check only (as used in `make quality`) run: ```bash python utils/custom_init_isort.py --check_only ``` """ import argparse import os import re from typing import Any, Callable, List, Optional # Path is defined with the intent you should run this script from the root of the repo. PATH_TO_TRANSFORMERS = "src/transformers" # Pattern that looks at the indentation in a line. _re_indent = re.compile(r"^(\s*)\S") # Pattern that matches `"key":" and puts `key` in group 0. _re_direct_key = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. _re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _re_bracket_content = re.compile(r"\[([^\]]+)\]") def get_indent(line: str) -> str: """Returns the indent in given line (as string).""" search = _re_indent.search(line) return "" if search is None else search.groups()[0] def split_code_in_indented_blocks( code: str, indent_level: str = "", start_prompt: Optional[str] = None, end_prompt: Optional[str] = None ) -> List[str]: """ Split some code into its indented blocks, starting at a given level. Args: code (`str`): The code to split. indent_level (`str`): The indent level (as string) to use for identifying the blocks to split. start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is. end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is. Warning: The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code` can thus be retrieved by joining the result. Returns: `List[str]`: The list of blocks. """ # Let's split the code into lines and move to start_index. index = 0 lines = code.split("\n") if start_prompt is not None: while not lines[index].startswith(start_prompt): index += 1 blocks = ["\n".join(lines[:index])] else: blocks = [] # This variable contains the block treated at a given time. current_block = [lines[index]] index += 1 # We split into blocks until we get to the `end_prompt` (or the end of the file). while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): # We have a non-empty line with the proper indent -> start of a new block if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: # Store the current block in the result and rest. There are two cases: the line is part of the block (like # a closing parenthesis) or not. if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): # Line is part of the current block current_block.append(lines[index]) blocks.append("\n".join(current_block)) if index < len(lines) - 1: current_block = [lines[index + 1]] index += 1 else: current_block = [] else: # Line is not part of the current block blocks.append("\n".join(current_block)) current_block = [lines[index]] else: # Just add the line to the current block current_block.append(lines[index]) index += 1 # Adds current block if it's nonempty. if len(current_block) > 0: blocks.append("\n".join(current_block)) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lines): blocks.append("\n".join(lines[index:])) return blocks def ignore_underscore_and_lowercase(key: Callable[[Any], str]) -> Callable[[Any], str]: """ Wraps a key function (as used in a sort) to lowercase and ignore underscores. """ def _inner(x): return key(x).lower().replace("_", "") return _inner def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]] = None) -> List[Any]: """ Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased last). Args: objects (`List[Any]`): The list of objects to sort. key (`Callable[[Any], str]`, *optional*): A function taking an object as input and returning a string, used to sort them by alphabetical order. If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string). Returns: `List[Any]`: The sorted list with the same elements as in the inputs """ # If no key is provided, we use a noop. def noop(x): return x if key is None: key = noop # Constants are all uppercase, they go first. constants = [obj for obj in objects if key(obj).isupper()] # Classes are not all uppercase but start with a capital, they go second. classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()] # Functions begin with a lowercase, they go last. functions = [obj for obj in objects if not key(obj)[0].isupper()] # Then we sort each group. key1 = ignore_underscore_and_lowercase(key) return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1) def sort_objects_in_import(import_statement: str) -> str: """ Sorts the imports in a single import statement. Args: import_statement (`str`): The import statement in which to sort the imports. Returns: `str`: The same as the input, but with objects properly sorted. """ # This inner function sort imports between [ ]. def _replace(match): imports = match.groups()[0] # If there is one import only, nothing to do. if "," not in imports: return f"[{imports}]" keys = [part.strip().replace('"', "") for part in imports.split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: keys = keys[:-1] return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]" lines = import_statement.split("\n") if len(lines) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. idx = 2 if lines[1].strip() == "[" else 1 keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])] sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1]) sorted_lines = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) elif len(lines) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1]) is not None: lines[1] = _re_bracket_content.sub(_replace, lines[1]) else: keys = [part.strip().replace('"', "") for part in lines[1].split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: keys = keys[:-1] lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)]) return "\n".join(lines) else: # Finally we have to deal with imports fitting on one line import_statement = _re_bracket_content.sub(_replace, import_statement) return import_statement def sort_imports(file: str, check_only: bool = True): """ Sort the imports defined in the `_import_structure` of a given init. Args: file (`str`): The path to the init to check/fix. check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. """ with open(file, encoding="utf-8") as f: code = f.read() # If the file is not a custom init, there is nothing to do. if "_import_structure" not in code or "define_import_structure" in code: return # Blocks of indent level 0 main_blocks = split_code_in_indented_blocks( code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1, len(main_blocks) - 1): # Check if the block contains some `_import_structure`s thingy to sort. block = main_blocks[block_idx] block_lines = block.split("\n") # Get to the start of the imports. line_idx = 0 while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: line_idx = len(block_lines) else: line_idx += 1 if line_idx >= len(block_lines): continue # Ignore beginning and last line: they don't contain anything. internal_block_code = "\n".join(block_lines[line_idx:-1]) indent = get_indent(block_lines[1]) # Slit the internal block into blocks of indent level 1. internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent) # We have two categories of import key: list or _import_structure[key].append/extend pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks] # We only sort the lines with a key. keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None] sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. count = 0 reorderded_blocks = [] for i in range(len(internal_blocks)): if keys[i] is None: reorderded_blocks.append(internal_blocks[i]) else: block = sort_objects_in_import(internal_blocks[sorted_indices[count]]) reorderded_blocks.append(block) count += 1 # And we put our main block back together with its first and last line. main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]]) if code != "\n".join(main_blocks): if check_only: return True else: print(f"Overwriting {file}.") with open(file, "w", encoding="utf-8") as f: f.write("\n".join(main_blocks)) def sort_imports_in_all_inits(check_only=True): """ Sort the imports defined in the `_import_structure` of all inits in the repo. Args: check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. """ failures = [] for root, _, files in os.walk(PATH_TO_TRANSFORMERS): if "__init__.py" in files: result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only) if result: failures = [os.path.join(root, "__init__.py")] if len(failures) > 0: raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") args = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
transformers/utils/custom_init_isort.py/0
{ "file_path": "transformers/utils/custom_init_isort.py", "repo_id": "transformers", "token_count": 5360 }
221
import argparse import os past_versions_testing = { "pytorch": { "1.13": { "torch": "1.13.1", "torchvision": "0.14.1", "torchaudio": "0.13.1", "python": 3.9, "cuda": "cu116", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.13.1 torchvision==0.14.1 torchaudio==0.13.1" " --extra-index-url https://download.pytorch.org/whl/cu116" ), "base_image": "nvidia/cuda:11.6.2-cudnn8-devel-ubuntu20.04", }, "1.12": { "torch": "1.12.1", "torchvision": "0.13.1", "torchaudio": "0.12.1", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.12.1 torchvision==0.13.1 torchaudio==0.12.1" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.11": { "torch": "1.11.0", "torchvision": "0.12.0", "torchaudio": "0.11.0", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "1.10": { "torch": "1.10.2", "torchvision": "0.11.3", "torchaudio": "0.10.2", "python": 3.9, "cuda": "cu113", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.10.2 torchvision==0.11.3 torchaudio==0.10.2" " --extra-index-url https://download.pytorch.org/whl/cu113" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, # torchaudio < 0.10 has no CUDA-enabled binary distributions "1.9": { "torch": "1.9.1", "torchvision": "0.10.1", "torchaudio": "0.9.1", "python": 3.9, "cuda": "cu111", "install": ( "python3 -m pip install --no-cache-dir -U torch==1.9.1 torchvision==0.10.1 torchaudio==0.9.1" " --extra-index-url https://download.pytorch.org/whl/cu111" ), "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, "tensorflow": { "2.11": { "tensorflow": "2.11.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.11.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.10": { "tensorflow": "2.10.1", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.10.1", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.9": { "tensorflow": "2.9.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.9.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.8": { "tensorflow": "2.8.2", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.8.2", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.7": { "tensorflow": "2.7.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.7.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.6": { "tensorflow": "2.6.5", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.6.5", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, "2.5": { "tensorflow": "2.5.3", "install": "python3 -m pip install --no-cache-dir -U tensorflow==2.5.3", "base_image": "nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04", }, }, } if __name__ == "__main__": parser = argparse.ArgumentParser("Choose the framework and version to install") parser.add_argument( "--framework", help="The framework to install. Should be `torch` or `tensorflow`", type=str, required=True ) parser.add_argument("--version", help="The version of the framework to install.", type=str, required=True) args = parser.parse_args() info = past_versions_testing[args.framework][args.version] os.system(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile') print(f'echo "export INSTALL_CMD=\'{info["install"]}\'" >> ~/.profile') cuda = "" if args.framework == "pytorch": cuda = info["cuda"] os.system(f"echo \"export CUDA='{cuda}'\" >> ~/.profile") print(f"echo \"export CUDA='{cuda}'\" >> ~/.profile")
transformers/utils/past_ci_versions.py/0
{ "file_path": "transformers/utils/past_ci_versions.py", "repo_id": "transformers", "token_count": 2774 }
222
from transformers import CLIPImageProcessor class CustomImageProcessor(CLIPImageProcessor): pass
transformers/utils/test_module/custom_image_processing.py/0
{ "file_path": "transformers/utils/test_module/custom_image_processing.py", "repo_id": "transformers", "token_count": 29 }
223
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Run benchmark using the `optimum-benchmark` library with some customization in `transformers`. Assume we are under `transformers` root directory: (make sure the commits are valid commits) ```bash python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun ``` """ import argparse import glob import json import os.path import re import tempfile from contextlib import contextmanager from pathlib import Path from git import Repo from huggingface_hub import HfApi from optimum_benchmark import Benchmark from optimum_benchmark_wrapper import main PATH_TO_REPO = Path(__file__).parent.parent.resolve() @contextmanager def checkout_commit(repo: Repo, commit_id: str): """ Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. Args: repo (`git.Repo`): A git repository (for instance the Transformers repo). commit_id (`str`): The commit reference to checkout inside the context manager. """ current_head = repo.head.commit if repo.head.is_detached else repo.head.ref try: repo.git.checkout(commit_id) yield finally: repo.git.checkout(current_head) def summarize(run_dir, metrics, expand_metrics=False): """Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`. Each summary's format is as follows (for `expand_metrics=False`): ``` { "model": "google/gemma-2b", "commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7", "config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5", "metrics": { "decode.latency.mean": 1.624666809082031, "per_token.latency.mean": 0.012843788806628804, "per_token.throughput.value": 77.85864553330948 } } ``` """ reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True) report_dirs = [str(Path(report).parent) for report in reports] summaries = [] for report_dir in report_dirs: commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0] if not os.path.isfile(os.path.join(report_dir, "benchmark.json")): continue benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json")) report = benchmark.report model = benchmark.config.backend["model"] # Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) benchmark_name = str(Path(benchmark_name).parts[-1]) if benchmark_name.startswith("commit="): benchmark_name = benchmark.config.name metrics_values = {} # post-processing of report: show a few selected/important metric for metric in metrics: keys = metric.split(".") value = report.to_dict() current = metrics_values for key in keys: # Avoid KeyError when a user's specified metric has typo. # TODO: Give warnings. if key not in value: continue value = value[key] if expand_metrics: if isinstance(value, dict): if key not in current: current[key] = {} current = current[key] else: current[key] = value if not expand_metrics: metrics_values[metric] = value # show some config information print(f"model: {model}") print(f"commit: {commit}") print(f"config: {benchmark_name}") if len(metrics_values) > 0: print("metrics:") if expand_metrics: print(metrics_values) else: for metric, value in metrics_values.items(): print(f" - {metric}: {value}") print("-" * 80) summary = { "model": model, "commit": commit, "config": benchmark_name, "metrics": metrics_values, } summaries.append(summary) with open(os.path.join(report_dir, "summary.json"), "w") as fp: json.dump(summary, fp, indent=4) return summaries def combine_summaries(summaries): """Combine a list of summary obtained from the function `summarize`. The combined summary's format is as follows: ``` "google/gemma-2b": { "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.624666809082031} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": {"decode.latency.mean": 1.6278163452148438} } }, "benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": { "3cd6ed22e4d49219f300f5055e71e3929aba20d7": { "metrics": {"decode.latency.mean": 1.6947791748046876} }, "c97ee28b117c0abe8e08891f402065e4df6d72aa": { "metrics": { "decode.latency.mean": 1.6980519409179688} } } } ``` """ combined = {} for summary in summaries: model = summary["model"] config = summary["config"] commit = summary["commit"] if model not in combined: combined[model] = {} if config not in combined[model]: combined[model][config] = {} if commit not in combined[model][config]: combined[model][config][commit] = {"metrics": summary["metrics"]} with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp: json.dump(combined, fp, indent=4) print(json.dumps(combined, indent=4)) return combined if __name__ == "__main__": def list_str(values): return values.split(",") parser = argparse.ArgumentParser() parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.") parser.add_argument("--config-name", type=str, required=True, help="The config name.") # arguments specific to this wrapper for our own customization parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.") parser.add_argument( "--commit", type=list_str, default="", help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.", ) parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.") parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.") parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.") parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).") args, optimum_benchmark_args = parser.parse_known_args() repo = Repo(PATH_TO_REPO) metrics = [ "prefill.latency.mean", "prefill.throughput.value", "decode.latency.mean", "decode.throughput.value", "per_token.latency.mean", "per_token.throughput.value", ] if args.metrics is not None: metrics = args.metrics.split(",") # Get `backend.model` in a hacky way: We want to control the experiment flow manually. models = [""] for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("backend.model="): models = arg[len("backend.model=") :] models = models.split(",") break optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")] # Get the commit(s) current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref) commits = [x for x in args.commit if x != ""] if len(commits) == 0: commits = [current_head] elif len(commits) == 1 and commits[0] == "diff": # compare to `main` commits = ["main", current_head] # Get the specified run directory run_dir_arg_idx, run_dir = -1, None sweep_dir_arg_idx, sweep_dir = -1, None for idx, arg in enumerate(optimum_benchmark_args): if arg.startswith("hydra.run.dir="): run_dir = arg[len("hydra.run.dir=") :] run_dir_arg_idx = idx elif arg.startswith("hydra.sweep.dir="): sweep_dir = arg[len("hydra.sweep.dir=") :] sweep_dir_arg_idx = idx exp_run_dir, arg_dix, arg_name = ( (sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir") if "--multirun" in optimum_benchmark_args else (run_dir, run_dir_arg_idx, "hydra.run.dir") ) # TODO: not hardcoded if exp_run_dir is None and args.ensure_empty: exp_run_dir = "_benchmark" if args.ensure_empty: os.makedirs(exp_run_dir, exist_ok=True) exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir) run_summaries = [] for commit in commits: with checkout_commit(repo, commit): commit = str(repo.head.commit) commit_run_dir = exp_run_dir if exp_run_dir is not None: commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}") print(f"Run benchmark on commit: {commit}") for model in models: model_arg = [f"backend.model={model}"] if model != "" else [] dir_args = [] if commit_run_dir is not None: if arg_dix > -1: optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}" else: dir_args = [ f"hydra.sweep.dir={commit_run_dir}", f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}", ] main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args) if commit_run_dir is not None: # Need to remove the `\` character summaries = summarize(commit_run_dir.replace("\\", ""), metrics) run_summaries.extend(summaries) # aggregate the information across the commits if exp_run_dir is not None: with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp: json.dump(run_summaries, fp, indent=4) combined_summary = combine_summaries(run_summaries) if args.repo_id is not None and args.path_in_repo is not None: # Upload to Hub api = HfApi() api.upload_folder( folder_path=exp_run_dir, path_in_repo=args.path_in_repo, repo_id=args.repo_id, repo_type="dataset", token=args.token, )
transformers/benchmark/benchmark.py/0
{ "file_path": "transformers/benchmark/benchmark.py", "repo_id": "transformers", "token_count": 5440 }
0
FROM python:3.10-slim ENV PYTHONDONTWRITEBYTECODE=1 ARG REF=main USER root RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-cache-dir --no-deps timm accelerate RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk # RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset' # RUN git clone https://github.com/facebookresearch/detectron2.git # RUN python3 -m pip install --no-cache-dir -e detectron2 RUN pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3' RUN pip uninstall -y transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/*
transformers/docker/exotic-models.dockerfile/0
{ "file_path": "transformers/docker/exotic-models.dockerfile", "repo_id": "transformers", "token_count": 468 }
1
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 FROM nvcr.io/nvidia/pytorch:23.11-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' RUN apt -y update RUN apt install -y libaio-dev RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip uninstall -y torch torchvision torchaudio # Install **nightly** release PyTorch (flag `--pre`) # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # Uninstall `transformer-engine` shipped with the base image RUN python3 -m pip uninstall -y transformer-engine # Uninstall `torch-tensorrt` and `apex` shipped with the base image RUN python3 -m pip uninstall -y torch-tensorrt apex # Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout) RUN python3 -m pip uninstall -y deepspeed # This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.) # Issue: https://github.com/microsoft/DeepSpeed/issues/2010 # RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \ # DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 ## For `torchdynamo` tests ## (see https://github.com/huggingface/transformers/pull/17765) #RUN git clone https://github.com/pytorch/functorch #RUN python3 -m pip install --no-cache-dir ./functorch[aot] #RUN cd functorch && python3 setup.py develop # #RUN git clone https://github.com/pytorch/torchdynamo #RUN python3 -m pip install -r ./torchdynamo/requirements.txt #RUN cd torchdynamo && python3 setup.py develop # ## install TensorRT #RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex #RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2 # ## install torch_tensorrt (fx path) #RUN git clone https://github.com/pytorch/TensorRT.git #RUN cd TensorRT/py && python3 setup.py install --fx-only # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop # Disable for now as deepspeed is not installed above. To be enabled once the issue is fixed. # RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile", "repo_id": "transformers", "token_count": 1028 }
2
# تحميل نماذج مدربة مسبقًا باستخدام AutoClass لم ترغب في إنشاء محول معماري لمؤشر الترابط الخاص بك، فهناك العديد من محولات المعمارية المختلفة التي يمكنك الاختيار من بينها. كجزء من الفلسفة الأساسية لـ 🤗 Transformers لجعل المكتبة سهلة وبسيطة ومرنة، فإن فئة `AutoClass` تستدل تلقائيًا وتحمّل البنية الصحيحة من نسخة نموذج (Model Checkpoint) معينة. تسمح لك طريقة `from_pretrained()` بتحميل نموذج مُدرب مسبقًا لأي بنية بسرعة حتى لا تضطر إلى تكريس الوقت والموارد لتدريب نموذج من الصفر. إن إنتاج هذا النوع من التعليمات البرمجية غير المعتمدة على نسخ يعني أنه إذا نجح رمزك مع ننسخة واحدة، فسيتم تشغيله مع أخرى - طالما تم تدريبه لمهمة مماثلة - حتى إذا كانت البنية المعمارية مختلفة. تذكر أن البنية تشير إلى هيكل النموذج، والنسخ هي الأوزان لبنية معمارية معينة. على سبيل المثال، [BERT](https://huggingface.co/google-bert/bert-base-uncased) هي بنية معمارية، في حين أن `google-bert/bert-base-uncased` هي نسخة. "النموذج" هو مصطلح عام يمكن أن يعني إما البنية أو نالنسخة. في هذا البرنامج التعليمي، ستتعلم كيفية: * تحميل مُجزّئ الرموز مُدرب مسبقًا * تحميل معالج صور مُدرب مسبقًا * تحميل مستخرج ميزات مُدرب مسبقًا * تحميل معالج مُدرب مسبقًا * تحميل نموذج مُدرب مسبقًا * تحميل نموذج كعمود فقري ## AutoTokenizer تبدأ كل مهمة NLP تقريبًا بمُجزّئ للرموز. يقوم المُجزّئ بتحويل النص إلى شكل يمكن للنموذج معالجته. قم بتحميل المُجزّئ باستخدام [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` ثم قم بتحليل إدخالك على النحو الموضح أدناه: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## معالج الصور التلقائي (AutoImageProcessor) بالنسبة لمهمات الرؤية، يقوم معالج الصور بمعالجة الصورة إلى تنسيق الإدخال الصحيح. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoBackbone <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">الصورة توضح مخطط مراحل نموذج Swin.</figcaption> </div> يسمح لك [`AutoBackbone`] باستخدام النماذج المُدربة مسبقًا كعمود فقري للحصول على خرائط ميزات من مراحل مختلفة من العمود الفقري. يجب عليك تحديد أحد المعلمات التالية في [`~PretrainedConfig.from_pretrained`]: * `out_indices` هو فهرس الطبقة التي تريد الحصول على خريطة الميزات منها * `out_features` هو اسم الطبقة التي تريد الحصول على خريطة الميزات منها يمكن استخدام هذه المعلمات بشكل متبادل، ولكن إذا كنت تستخدم كلاً منها، فتأكد من أنها متوائمة مع بعضها البعض! إذا لم تمرر أيًا من هذه المعلمات، فسيقوم العمود الفقري بإرجاع خريطة الميزات من الطبقة الأخيرة. <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">صورة توضح خريطة ميزات من المرحلة الأولى للعمود الفقري.</figcaption> </div> على سبيل المثال، في الرسم التخطيطي أعلاه، لإرجاع خريطة الميزات من المرحلة الأولى من العمود الفقري Swin، يمكنك تعيين `out_indices=(1,)`: ```py >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,)) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps ``` الآن يمكنك الوصول إلى كائن `feature_maps` من المرحلة الأولى من العمود الفقري: ```py >>> list(feature_maps[0].shape) [1, 96, 56, 56] ``` ## مستخرج الميزات التلقائي (AutoFeatureExtractor) بالنسبة للمهام الصوتية، يقوم مستخرج الميزات بمعالجة إشارة الصوت إلى تنسيق الإدخال الصحيح. قم بتحميل مستخرج ميزات باستخدام [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## المعالج التلقائي (AutoProcessor) تتطلب المهام متعددة الوسائط معالجًا يجمع بين نوعين من أدوات المعالجة المسبقة. على سبيل المثال، يتطلب نموذج [LayoutLMV2](model_doc/layoutlmv2) معالج صور لمعالجة الصور ومُجزّئ لمعالجة النص؛ يجمع المعالج كليهما. قم بتحميل معالج باستخدام [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## النموذج التلقائي (AutoModel) <frameworkcontent> <pt> تسمح لك فئات `AutoModelFor` بتحميل نموذج مُدرب مسبقًا لمهمة معينة (راجع [هنا](model_doc/auto) للحصول على قائمة كاملة بالمهام المتاحة). على سبيل المثال، قم بتحميل نموذج لتصنيف التسلسل باستخدام [`AutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` أعد استخدام نفس نقطة التفتيش لتحميل بنية لمهمة مختلفة: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> بالنسبة لنماذج PyTorch، تستخدم طريقة `from_pretrained()` `torch.load()` التي تستخدم داخليًا `pickle` والتي يُعرف أنها غير آمنة. بشكل عام، لا تقم مطلقًا بتحميل نموذج قد يكون مصدره مصدرًا غير موثوق به، أو قد يكون تم العبث به. يتم تخفيف هذا الخطر الأمني جزئيًا للنماذج العامة المستضافة على Hub Hugging Face، والتي يتم [فحصها بحثًا عن البرامج الضارة](https://huggingface.co/docs/hub/security-malware) في كل ارتكاب. راجع [توثيق Hub](https://huggingface.co/docs/hub/security) للحصول على أفضل الممارسات مثل [التحقق من التوقيع](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) باستخدام GPG. لا تتأثر نقاط تفتيش TensorFlow و Flax، ويمكن تحميلها داخل بنيات PyTorch باستخدام `from_tf` و `from_flax` kwargs لطريقة `from_pretrained` للتحايل على هذه المشكلة. </Tip> بشكل عام، نوصي باستخدام فئة `AutoTokenizer` وفئة `AutoModelFor` لتحميل مثيلات مُدربة مسبقًا من النماذج. سيساعدك هذا في تحميل البنية الصحيحة في كل مرة. في البرنامج التعليمي التالي، تعرف على كيفية استخدام المحلل اللغوي ومعالج الصور ومستخرج الميزات والمعالج الذي تم تحميله حديثًا لمعالجة مجموعة بيانات للضبط الدقيق. </pt> <tf> أخيرًا، تسمح لك فئات `TFAutoModelFor` بتحميل نموذج مُدرب مسبقًا لمهمة معينة (راجع [هنا](model_doc/auto) للحصول على قائمة كاملة بالمهام المتاحة). على سبيل المثال، قم بتحميل نموذج لتصنيف التسلسل باستخدام [`TFAutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` أعد استخدام نفس نقطة التفتيش لتحميل بنية لمهمة مختلفة: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` بشكل عام، نوصي باستخدام فئة `AutoTokenizer` وفئة `TFAutoModelFor` لتحميل نسخ لنماذج مُدربة مسبقًا. سيساعدك هذا في تحميل البنية الصحيحة في كل مرة. في البرنامج التعليمي التالي، ستتعرف على كيفية استخدام المُجزّئ اللغوي ومعالج الصور ومستخرج الميزات والمعالج الذي تم تحميله حديثًا لمعالجة مجموعة بيانات للضبط الدقيق. </tf> </frameworkcontent>
transformers/docs/source/ar/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/ar/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 5440 }
3
# شارك نموذجك مع العالم أظهرت آخر درسين تعليميين كيفية ضبط نموذج بدقة باستخدام PyTorch و Keras و 🤗 Accelerate لعمليات التهيئة الموزعة. والخطوة التالية هي مشاركة نموذجك مع المجتمع! في Hugging Face، نؤمن بالمشاركة المفتوحة للمعرفة والموارد لتمكين الجميع من الاستفادة من الذكاء الاصطناعي. ونشجعك على مشاركة نموذجك مع المجتمع لمساعدة الآخرين على توفير الوقت والموارد. في هذا الدرس، ستتعلم طريقتين لمشاركة نموذجك المدرب أو مضبوط على منصة [Model Hub](https://huggingface.co/models): - رفع ملفاتك إلى منصة Hub مباشرة باستخدام الكود البرمجي. - قم بسحب وإفلات ملفاتك إلى Hub باستخدام الواجهة web. <iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="مشغل فيديو YouTube" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe> <Tip> لمشاركة نموذج مع المجتمع، تحتاج إلى حساب على [huggingface.co](https://huggingface.co/join). يمكنك أيضًا الانضمام إلى منظمة موجودة أو إنشاء منظمة جديدة. </Tip> ## ميزات المستودع يعمل كل مستودع على Model Hub مثل مستودع GitHub النتقليدي. تقدم مستودعاتنا التحكم في الإصدارات وسجل التغييرات، وقدرة على رؤية الاختلافات بين الإصدارات. تعتمد آلية التحكم في الإصدارات على منصة Model Hub على نظامي git و [git-lfs](https://git-lfs.github.com/). وبعبارة أخرى، يمكنك التعامل مع كل نموذج كأنه مستودع مستقل، مما يمكّن من زيادة التحكم في الوصول والقابلية للتطوير. يسمح التحكم في الإصدار بإجراء تعديلات وتثبيت إصدار محدد من النموذج باستخدام رمز التغيير (commit hash) أو وسم (tag) أو فرع (branch). بفضل هذه الميزة، يمكنك تحميل إصدار محدد من النموذج باستخدام معلمة الإصدار "revision": ```py >>> model = AutoModel.from_pretrained( ... "julien-c/EsperBERTo-small", revision="4c77982" # اسم العلامة، أو اسم الفرع، أو تجزئة الالتزام ... ) ``` من السهل أيضًا تعديل الملفات الموجودة داخل مستودع، ويمكنك عرض سجل التغييرات التي طرأت على هذه الملفات ومعاينة الاختلافات بين الإصدارات المختلفة: ![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) ## الإعداد قبل مشاركة نموذج على Hub، ستحتاج إلى بيانات اعتماد حساب Hugging Face الخاصة بك. إذا كنت تستخدم منصة الأوامر، فقم بتشغيل الأمر التالي في بيئة افتراضية حيث تم تثبيت 🤗 Transformers. سيقوم هذا الأمر بتخزين رمز الدخول الخاص بك في مجلد تخزين المؤقت لـ Hugging Face (`~/.cache/` بشكل افتراضي): ```bash huggingface-cli login ``` إذا كنت تستخدم دفتر ملاحظات مثل Jupyter أو Colaboratory، فتأكد من تثبيت مكتبة [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library). تسمح لك هذه المكتبة بالتفاعل برمجيًا مع Hub. ```bash pip install huggingface_hub ``` ثم استخدم `notebook_login` لتسجيل الدخول إلى Hub، واتبع الرابط [هنا](https://huggingface.co/settings/token) لإنشاء رمز للتسجيل: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## تحويل النموذج ليتوافق مع جميع الأطر العمل لضمان إمكانية استخدام نموذجك من قبل شخص يعمل بإطار عمل مختلف، نوصي بتحويل نموذجك ورفعه مع نقاط التحقق من PyTorch و TensorFlow. في حين أن المستخدمين لا يزال بإمكانهم تحميل نموذجك من إطار عمل مختلف إذا تخطيت هذه الخطوة، إلا أنه سيكون أبطأ لأن 🤗 Transformers ستحتاج إلى تحويل نقطة التحقق أثناء التشغيل. تحويل نقطة التحقق لإطار عمل آخر أمر سهل. تأكد من تثبيت PyTorch و TensorFlow (راجع [هنا](installation) لتعليمات التثبيت)، ثم ابحث عن النموذج الملائم لمهمتك في الإطار الآخر. <frameworkcontent> <pt> حدد `from_tf=True` لتحويل نقطة تحقق من TensorFlow إلى PyTorch: ```py >>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) >>> pt_model.save_pretrained("path/to/awesome-name-you-picked") ``` </pt> <tf> حدد `from_pt=True` لتحويل نقطة تحقق من PyTorch إلى TensorFlow: ```py >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) ``` بعد ذلك، يمكنك حفظ نموذج TensorFlow الجديد بنقطة التحقق الجديدة: ```py >>> tf_model.save_pretrained("path/to/awesome-name-you-picked") ``` </tf> <jax> إذا كان النموذج متاحًا في Flax، فيمكنك أيضًا تحويل نقطة تحقق من PyTorch إلى Flax: ```py >>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( ... "path/to/awesome-name-you-picked", from_pt=True ... ) ``` </jax> </frameworkcontent> ## دفع نموذج أثناء التدريب <frameworkcontent> <pt> <Youtube id="Z1-XMy-GNLQ"/> مشاركة نموذجك على Hub مر بسيط للغاية كل ما عليك هو إضافة معلمة أو استدعاء رد إضافي. كما تذكر من درس [التدريب الدقيق](training)، فإن فئة [`TrainingArguments`] هي المكان الذي تحدد فيه المعلمات الفائقة وخيارات التدريب الإضافية. تشمل إحدى خيارات التدريب هذه القدرة على دفع النموذج مباشرة إلى المنصة Hub. قم بتعيين `push_to_hub=True` في [`TrainingArguments`]: ```py >>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) ``` مرر معامﻻت التدريب كالمعتاد إلى [`Trainer`]: ```py >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... ) ``` بعد ضبط نموذجك بدقة، يمكنك استخدام دالة [`~transformers.Trainer.push_to_hub`] المتاحة في [`Trainer`] لدفع النموذج المدرب إلى المنصة Hub. سوف تضيف 🤗 Transformers تلقائيًا المعلمات الفائقة المستخدمة في التدريب ونتائج التدريب وإصدارات الإطار إلى بطاقة معلومات النموذج الخاصة بك! ```py >>> trainer.push_to_hub() ``` </pt> <tf> شارك نموذجًا على Hub باستخدام [`PushToHubCallback`]. في دالة [`PushToHubCallback`], أضف: - دليل إخراج لنموذجك. - مُجزّئ اللغوي. - `hub_model_id`، والذي هو اسم مستخدم Hub واسم النموذج الخاص بك. ```py >>> from transformers import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" ... ) ``` أضف الاستدعاء إلى [`fit`](https://keras.io/api/models/model_training_apis/)، وسيقوم 🤗 Transformers بدفع النموذج المدرب إلى Hub: ```py >>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) ``` </tf> </frameworkcontent> ## استخدام دالة `push_to_hub` يمكنك أيضًا استدعاء `push_to_hub` مباشرة على نموذجك لتحميله إلى Hub. حدد اسم نموذجك في `push_to_hub`: ```py >>> pt_model.push_to_hub("my-awesome-model") ``` ينشئ هذا مستودعًا تحت اسم المستخدم الخاص بك باسم نموذج `my-awesome-model`. يمكن للمستخدمين الآن تحميل نموذجك باستخدام دالة `from_pretrained`: ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` ```py >>> from transformers import AutoModel >>> model = AutoModel.from_pretrained("your_username/my-awesome-model") ``` إذا كنت تنتمي إلى منظمة وتريد دفع نموذجك تحت اسم المنظمة بدلاً من ذلك، فما عليك سوى إضافته إلى `repo_id`: ```py >>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") ``` يمكن أيضًا استخدام دالة `push_to_hub` لإضافة ملفات أخرى إلى مستودع النماذج. على سبيل المثال، أضف رموزًا إلى مستودع نموذج: ```py >>> tokenizer.push_to_hub("my-awesome-model") ``` أو ربما تريد إضافة إصدار TensorFlow من نموذج PyTorch المضبوط: ```py >>> tf_model.push_to_hub("my-awesome-model") ``` الآن عند الانتقال إلى ملفك الشخصي على Hugging Face، يجب أن ترى مستودع النماذج الذي أنشأته حديثًا. سيؤدي النقر فوق علامة التبويب **Files** إلى عرض جميع الملفات التي قمت بتحميلها في المستودع. للحصول على مزيد من التفاصيل حول كيفية إنشاء الملفات وتحميلها إلى مستودع، راجع وثائق Hub [هنا](https://huggingface.co/docs/hub/how-to-upstream). ## التحميل باستخدام الواجهة web يمكن للمستخدمين الذين يفضلون نهج عدم الترميز تحميل نموذج من خلال واجهة Hub web. قم بزيارة [huggingface.co/new](https://huggingface.co/new) لإنشاء مستودع جديد: ![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) من هنا، أضف بعض المعلومات حول نموذجك: - حدد **مالك** المستودع. يمكن أن يكون هذا أنت أو أي من المنظمات التي تنتمي إليها. - اختر اسمًا لنموذجك، والذي سيكون أيضًا اسم المستودع. - اختر ما إذا كان نموذجك عامًا أم خاصًا. - حدد ترخيص الاستخدام لنموذجك. الآن انقر فوق علامة التبويب **Files** ثم انقر فوق الزر **Add file** لإضافة ملف جديد إلى مستودعك. ثم اسحب وأسقط ملفًا لتحميله وأضف رسالة الالتزام. ![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) ## إضافة بطاقة نموذج للتأكد من فهم المستخدمين لقدرات نموذجك وقيوده وتحيزاته المحتملة واعتباراته الأخلاقية، يرجى إضافة بطاقة نموذج إلى مستودعك. يتم تعريف بطاقة النموذج في ملف `README.md`. يمكنك إضافة بطاقة نموذج عن طريق: * قم بإنشاء ملف `README.md` وتحميله يدويًا. * انقر فوق الزر **Edit model card** في مستودع نموذجك. الق نظرة على بطاقة [DistilBert](https://huggingface.co/distilbert/distilbert-base-uncased) للحصول على مثال جيد على نوع المعلومات التي يجب أن تتضمنها بطاقة النموذج. للحصول على مزيد من التفاصيل حول الخيارات الأخرى التي يمكنك التحكم فيها في ملف `README.md` مثل البصمة الكربونية للنموذج أو أمثلة الأداة، راجع الوثائق [هنا](https://huggingface.co/docs/hub/models-cards).
transformers/docs/source/ar/model_sharing.md/0
{ "file_path": "transformers/docs/source/ar/model_sharing.md", "repo_id": "transformers", "token_count": 6706 }
4
# ما الذي تستطيع مكتبة 🤗 Transformers القيام به؟ مكتبة 🤗 Transformers هي مجموعة من النماذج المُدرّبة مسبقًا الأفضل في فئتها لمهام معالجة اللغة الطبيعية (NLP)، ورؤية الحاسوب، ومعالجة الصوت والكلام. لا تحتوي المكتبة فقط على نماذج المحولات (Transformer) فحسب، بل تشمل أيضًا نماذج أخرى لا تعتمد على المحولات مثل الشبكات العصبية التلافيفية الحديثة لمهام رؤية الحاسوب. إذا نظرت إلى بعض المنتجات الاستهلاكية الأكثر شيوعًا اليوم، مثل الهواتف الذكية والتطبيقات وأجهزة التلفاز، فمن المحتمل أن تقف وراءها تقنية ما من تقنيات التعلم العميق. هل تريد إزالة جسم من خلفية صورة التقطتها بهاتفك الذكي؟ هذا مثال على مهمة التجزئة البانورامية (Panoptic Segmentation) ( لا تقلق إذا لم تفهم معناها بعد، فسوف نشرحها في الأقسام التالية!). توفر هذه الصفحة نظرة عامة على مختلف مهام الكلام والصوت ورؤية الحاسوب ومعالجة اللغات الطبيعية المختلفة التي يمكن حلها باستخدام مكتبة 🤗 Transformers في ثلاثة أسطر فقط من التعليمات البرمجية! ## الصوت تختلف مهام معالجة الصوت والكلام قليلاً عن باقي الوسائط، ويرجع ذلك ببشكل أساسي لأن الصوت كمدخل هو إشارة متصلة. على عكس النص، لا يمكن تقسيم الموجة الصوتية الخام بشكل مرتب في أجزاء منفصلة بالطريقة التي يمكن بها تقسيم الجملة إلى كلمات. وللتغلب على هذا، يتم عادةً أخذ عينات من الإشارة الصوتية الخام على فترات زمنية منتظمة. كلما زاد عدد العينات التي تؤخذ في فترة زمنية معينة، ارتفع معدل أخذ العينات (معدل التردد)، وصار الصوت أقرب إلى مصدر الصوت الأصلي. قامت الطرق السابقة بمعالجة الصوت لاستخراج الميزات المفيدة منه. أصبح من الشائع الآن البدء بمهام معالجة الصوت والكلام عن طريق تغذية شكل الموجة الصوتية الخام مباشرة في مشفر الميزات (Feature Encoder) لاستخراج تمثيل صوتي له. وهذا يبسط خطوة المعالجة المسبقة ويسمح للنموذج بتعلم أهم الميزات. ### تصنيف الصوت تصنيف الصوت (Audio Classification) هو مهمة يتم فيها تصنيف بيانات الصوت الصوت من مجموعة محددة مسبقًا من الفئات. إنه فئة واسعة تضم العديد من التطبيقات المحددة، والتي تشمل: * تصنيف المشهد الصوتي: وضع علامة على الصوت باستخدام تسمية المشهد ("المكتب"، "الشاطئ"، "الملعب") * اكتشاف الأحداث الصوتية: وضع علامة على الصوت باستخدام تسمية حدث صوتي ("بوق السيارة"، "صوت الحوت"، "كسر زجاج") * الوسم: وصنيف صوت يحتوي على أصوات متعددة (أصوات الطيور، وتحديد هوية المتحدث في اجتماع) * تصنيف الموسيقى: وضع علامة على الموسيقى بتسمية النوع ("ميتال"، "هيب هوب"، "كانتري") ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### التعرف التلقائي على الكلام يقوم التعرف التلقائي على الكلام (ASR) هو عملية تحويل الكلام إلى نص. إنه أحد أكثر المهام الصوتية شيوعًا ويرجع ذلك جزئيًا إلى أن الكلام وسيلة طبيعية للتواصل البشري. واليوم، يتم تضمين أنظمة ASR في منتجات التقنية "الذكية" مثل مكبرات الصوت والهواتف والسيارات. يمكننا أن نطلب من مساعدينا الافتراضيين تشغيل الموسيقى، وضبط التذكيرات، وإخبارنا بأحوال الطقس. ولكن أحد التحديات الرئيسية التي ساعدت نماذج المحولات (Transformer) في التغلب عليها هو التعامل مع اللغات منخفضة الموارد. فمن خلال التدريب المسبق على كميات كبيرة من بيانات الصوتية، يُمكن ضبط النموذج بدقة (Fine-tuning) باستخدام ساعة واحدة فقط من بيانات الكلام المُوسم في لغة منخفضة الموارد إلى نتائج عالية الجودة مقارنة بأنظمة ASR السابقة التي تم تدريبها على بيانات موسومة أكثر بـ 100 مرة. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## رؤية الحاسب كانت إحدى أوائل مهام رؤية الحاسب وأنجحها هى التعرف على صور أرقام الرموز البريدية باستخدام [شبكة عصبية تلافيفية (CNN)](glossary#convolution). تتكون الصورة من وحدات بيكسل، ولكل بكسل قيمة رقمية. وهذا يجعل من السهل تمثيل صورة كمصفوفة من قيم البكسل. يصف كل مزيج معين من قيم البكسل ألوان الصورة. هناك طريقتان عامتان يمكن من خلالهما حل مهام رؤية الحاسب: 1. استخدام الالتفافات (Convolutions) لتعلم الميزات الهرمية للصورة بدءًا من الميزات منخفضة المستوى وصولًا إلى الأشياء المجردة عالية المستوى. 2. تقسيم الصورة إلى أجزاء واستخدام نموذج المحولات (Transformer) ليتعلم تدريجياً كيف ترتبط كل جزء صورة ببعضها البعض لتشكيل صورة. على عكس النهج ا التصاعدي (Bottom-Up) الذي تفضله الشبكات العصبية التلافيفية CNN، هذا يشبه إلى حد ما البدء بصورة ضبابية ثم جعلها أوضح تدريجيًا. ### تصنيف الصور يقوم تصنيف الصور (Image Classification) بوضع علامة على صورة كاملة من مجموعة محددة مسبقًا من الفئات. مثل معظم مهام التصنيف، هناك العديد من التطبيقات العملية لتصنيف الصور، والتي تشمل: * الرعاية الصحية: تصنيف الصور الطبية للكشف عن الأمراض أو مراقبة صحة المريض * البيئة: تصنيف صور الأقمار الصناعية لرصد إزالة الغابات، أو إبلاغ إدارة الأراضي البرية أو اكتشاف حرائق الغابات * الزراعة: تصنيفر المحاصيل لمراقبة صحة النبات أو صور الأقمار الصناعية لمراقبة استخدام الأراضي * علم البيئة: تصنيف صور الأنواع الحيوانية أو النباتية لرصد أعداد الكائنات الحية أو تتبع الأنواع المهددة بالانقراض ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### كشف الأجسام على عكس تصنيف الصور، يقوم كشف الأجسام (Object Detection) بتحديد عدة أجسام داخل صورة ومواضع هذه الأجسام في صورة (يحددها مربع الإحاطة). بعض تطبيقات كشف الأجسام تشمل: * المركبات ذاتية القيادة: اكتشاف أجسام المرورية اليومية مثل المركبات الأخرى والمشاة وإشارات المرور * الاستشعار عن بُعد: مراقبة الكوارث، والتخطيط الحضري، والتنبؤ بالطقس * اكتشاف العيوب: اكتشاف الشقوق أو الأضرار الهيكلية في المباني، وعيوب التصنيع ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### تجزئة الصور تجزئة الصورة (Image Segmentation) هي مهمة على مستوى البكسل تقوم بتخصيص كل بكسل في صورة لفئة معينة. إنه يختلف عن كشف الأجسام، والذي يستخدم مربعات الإحاطة (Bounding Boxes) لتصنيف والتنبؤ بالأجسام في الصورة لأن التجزئة أكثر دقة. يمكن لتجزئة الصور اكتشاف الأجسام على مستوى البكسل. هناك عدة أنواع من تجزئة الصور: * تجزئة مثيلات (Instance Segmentation): بالإضافة إلى تصنيف فئة كائن، فإنها تُصنّف أيضًا كل مثيل (Instance) مميز لكائن ("الكلب-1"، "الكلب-2") * التجزئة البانورامية (Panoptic Segmentation): مزيج من التجزئة الدلالية (Semantic Segmentation) وتجزئة المثيلات؛ فهو تُصنّف كل بكسل مع فئة دلالية **و** كل مثيل مميز لكائن تُعد مهام تجزئة الصور مفيدة في المركبات ذاتية القيادة على إنشاء خريطة على مستوى البكسل للعالم من حولها حتى تتمكن من التنقل بأمان حول المشاة والمركبات الأخرى. كما أنها مفيدة للتصوير الطبي، حيث يمكن للدقة العالية لهذ المهمة أن تساعد في تحديد الخلايا غير الطبيعية أو خصائص الأعضاء. يمكن أيضًا استخدام تجزئة الصور في التجارة الإلكترونية لتجربة الملابس افتراضيًا أو إنشاء تجارب الواقع المُعزز من خلال تراكب الأجسام في العالم الحقيقي من خلال الكاميرا الهاتف الخاصة بك. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### تقدير العمق يقوم تقدير العمق (Depth Estimation) بالتنبؤ بمسافة كل بكسل في صورة من الكاميرا. تُعد هذه المهمة لرؤية الحاسب هذه مهمة بشكل خاص لفهم وإعادة بناء المشهد. فعلى سبيل المثال، في السيارات ذاتية القيادة، تحتاج المركبات إلى فهم مدى بُعد الأجسام مثل المشاة ولافتات المرور والمركبات الأخرى لتجنب العقبات والاصطدامات. تساعد معلومات العمق أيضًا في بناء التمثيلات ثلاثية الأبعاد من الصور ثنائية الأبعاد ويمكن استخدامها لإنشاء تمثيلات ثلاثية الأبعاد عالية الجودة للهياكل البيولوجية أو المباني. هناك نهجان لتقدير العمق: * التصوير المجسم (Stereo): يتم تقدير العمق عن طريق مقارنة صورتين لنفس الصورة من زوايا مختلفة قليلاً. * التصوير الأحادي (Monocular): يتم تقدير العمق من صورة واحدة. ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## معالجة اللغات الطبيعية تُعد مهام معالجة اللغة الطبيعية (NLP) من بين أكثر أنواع المهام شيوعًا نظرًا لأن النص هو وسيلة طبيعية لنا للتواصل. ولكي يتمكن النموذج من فهم النص، يجب أولًا تحويله إلى صيغة رقمية. وهذا يعني تقسيم سلسلة النص إلى كلمات أو مقاطع كلمات منفصلة (رموز - Tokens)، ثم تحويل هذه الرموز إلى أرقام. ونتيجة لذلك، يمكنك تمثيل سلسلة من النص كتسلسل من الأرقام، وبمجرد حصولك على تسلسل من الأرقام، يمكن إدخاله إلى نموذج لحل جميع أنواع مهام معالجة اللغة الطبيعية! ### تصنيف النصوص تمامًا مثل مهام التصنيف في أي مجال آخر، يقوم تصنيف النصوص (Text Classification) بتصنيف سلسلة نصية يمكن أن تكون جملة أو فقرة أو مستند) إلى فئة محددة مسبقًا. هناك العديد من التطبيقات العملية لتصنيف النصوص، والتي تشمل: * تحليل المشاعر (Sentiment Analysis): تصنيف النص وفقًا لمعيار معين مثل `الإيجابية` أو `السلبية` والتي يمكن أن تُعلم وتدعم عملية صنع القرار في مجالات مثل السياسة والتمويل والتسويق * تصنيف المحتوى (Content Classification): تصنيف النص وفقًا لبعض الموضوعات للمساعدة في تنظيم وتصفية المعلومات في الأخبار وموجزات الوسائط الاجتماعية (`الطقس`، `الرياضة`، `التمويل`، إلخ). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### تصنيف الرموز في أي مهمة من مهام معالجة اللغة الطبيعية NLP، تتم معالجة النص مسبقًا عن طريق تقسيمه إلى كلمات أو مقاطع كلمات فردية تُعرف باسم [الرموز](glossary#token). يقوم تصنيف الرموز (Token Classification) بتخصيص تصنيف لكل رمز من مجموعة محددة مسبقًا من التصنيفات. هناك نوعان شائعان من تصنيف الرموز: * التعرف على الكيانات المسماة (NER): تصنيف الرموز وفقًا لفئة الكيان مثل المنظمة أو الشخص أو الموقع أو التاريخ. يعد NER شائعًا بشكل خاص في الإعدادات الطبية الحيوية، حيث يُمكنه تصنيف الجينات والبروتينات وأسماء الأدوية. * ترميز الأجزاء اللغوية (POS): تصنيف الرموز وفقًا للدورها النحوي مثل الاسم أو الفعل أو الصفة. POS مفيد لمساعدة أنظمة الترجمة على فهم كيفية اختلاف كلمتين متطابقتين نحويًا (مثل كلمة "عَلَمَ" كاسم و "عَلِمَ" كفعل). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### الإجابة على الأسئلة تُعدّ مهمة الإجابة عن الأسئلة (Question Answering) مهمة أخرى على مستوى الرموز (Token-Level) تُرجع إجابة لسؤال ما، وقد تعتمد هذه الإجابة على سياق (في النطاق المفتوح - Open-Domain) أو لا تعتمد على سياق (في النطاق المغلق - Closed-Domain). تحدث هذه المهمة عندما نسأل مساعدًا افتراضيًا عن شيء ما، مثل معرفة ما إذا كان مطعمٌ ما مفتوحًا. يمكن أن تُقدّم هذه المهمة أيضًا دعمًا للعملاء أو دعمًا تقنيًا، كما تُساعد محركات البحث في استرجاع المعلومات ذات الصلة التي نبحث عنها. هناك نوعان شائعان من الإجابة على الأسئلة: * الاستخراجية (Extractive): بالنظر إلى سؤال وسياق مُعيّن، فإن الإجابة هي مقطع نصيّ مُستخرج من السياق الذي يُحلّله النموذج. * التجريدية (Abstractive): بالنظر إلى سؤال وسياق مُعيّن، يتم إنشاء الإجابة من السياق؛ يتعامل نهج [`Text2TextGenerationPipeline`] مع هذا النهج بدلاً من [`QuestionAnsweringPipeline`] الموضح أدناه ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### التلخيص ينشئ التلخيص (Summarization) نسخة مختصرة من نص طويل مع محاولة الحفاظ على معظم معنى النص الأصلي. التلخيص هو مهمة تسلسل إلى تسلسل(Sequence-to-Sequence)؛؛ فهو تُنتج تسلسلًا نصيًا أقصر من النص المُدخل. هناك الكثير من المستندات الطويلة التي يمكن تلخيصها لمساعدة القراء على فهم النقاط الرئيسية بسرعة. مشاريع القوانين والوثائق القانونية والمالية وبراءات الاختراع والأوراق العلمية هي مجرد أمثلة قليلة للوثائق التي يمكن تلخيصها لتوفير وقت القراء وخدمة كمساعد للقراءة. مثل الإجابة على الأسئلة، هناك نوعان من التلخيص: * الاستخراجية (Extractive): تحديد واستخراج أهم الجمل من النص الأصلي * التجريدي (Abstractive): إنشاء ملخص مستهدف (الذي قد يتضمن كلمات جديدة غير موجودة في النص الأصلي) انطلاقًا من النص الأصلي؛ يستخدم نهج التلخيص التجريدي [`SummarizationPipeline`] ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### الترجمة تحوّل الترجمة تسلسل نص بلغة إلى لغة أخرى. من المهم مساعدة الأشخاص من خلفيات مختلفة على التواصل مع بعضهم البعض، ومساعدة المحتوى على الوصول إلى جمهور أوسع، وحتى أن يكون أداة تعليمية لمساعدة الأشخاص على تعلم لغة جديدة. إلى جانب التلخيص، تعد الترجمة مهمة من نوع تسلسل إلى تسلسل، حيث يتلقى النموذج تسلسلًا مُدخلًا ويُعيد تسلسلًا مُخرَجًا مُستهدفًا. في الأيام الأولى، كانت نماذج الترجمة في الغالب أحادية اللغة، ولكن مؤخرًا، كان هناك اهتمام متزايد بالنماذج متعددة اللغات التي يمكنها الترجمة بين العديد من أزواج اللغات. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### نمذجة اللغة نمذجة اللغة (Language Modeling) هي مهمة التنبؤ بالكلمة التالية في تسلسل نصي. لقد أصبح مهمة NLP شائعة للغاية لأن النموذج اللغوي المسبق التدريب يمكن أن يتم ضبطه بشكل دقيق للعديد من مهام الأخرى. في الآونة الأخيرة، كان هناك الكثير من الاهتمام بنماذج اللغة الكبيرة (LLMs) التي توضح التعلم من الصفر أو من عدد قليل من الأمثلة (Zero-shot or Few-shot Learning). وهذا يعني أن النموذج يمكنه حل المهام التي لم يتم تدريبه عليها بشكل صريح! يمكن استخدام نماذج اللغة لإنشاء نص سلس ومقنع، على الرغم من أنه يجب أن تكون حذرًا لأن النص قد لا يكون دائمًا دقيقًا. هناك نوعان من نمذجة اللغة: * السببية(Causal): هدف النموذج هو التنبؤ بالرمز (Token) التالي في التسلسل، ويتم إخفاء الرموز المستقبلية (Masking). ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * المقنّع (Masked): هدف النموذج هو التنبؤ برمز مُخفيّ ضمن التسلسل مع الوصول الكامل إلى الرموز الأخرى في التسلسل ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.2236, 'token': 1761, 'token_str': ' platform', 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] ``` ## متعدد الوسائط: تتطلب المهام متعددة الوسائط (Multimodal) من النموذج معالجة وسائط بيانات متعددة (نص أو صورة أو صوت أو فيديو) لحل مشكلة معينة. يعد وصف الصورة (Image Captioning) مثالاً على مهمة متعددة الوسائط حيث يأخذ النموذج صورة كمدخل وينتج تسلسل نصيًا يصف الصورة أو بعض خصائصها. على الرغم من أن النماذج متعددة الوسائط تعمل مع أنواع أو وسائط بيانات مختلفة، إلا أن خطوات المعالجة المسبقة تساعد النموذج داخليًا على تحويل جميع أنواع البيانات إلى متجهات تضمين (Embeddings) (متجهات أو قوائم من الأرقام التي تحتوي على معلومات ذات معنى حول البيانات). بالنسبة لمهمة مثل وصف الصورة، يتعلم النموذج العلاقات بين متجهات تضمين الصور ومتجهات تضمين النص. ### الإجابة على أسئلة المستندات: الإجابة على أسئلة المستندات (Document Question Answering) هي مهمة تقوم بالإجابة على أسئلة اللغة الطبيعية من مستند مُعطى. على عكس مهمة الإجابة على الأسئلة على مستوى الرموز (Token-Level) التي تأخذ نصًا كمدخل، فإن الإجابة على أسئلة المستندات تأخذ صورة لمستند كمدخل بالإضافة إلى سؤال هذا حول المستند وتعيد الإجابة. يمكن استخدام الإجابة على أسئلة المستندات لتفسير المستندات المُنسّقة واستخراج المعلومات الرئيسية منها. في المثال أدناه، يمكن استخراج المبلغ الإجمالي والمبلغ المُسترد من إيصال الدفع.. ```py >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> url = "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/jpeg_images/2.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") >>> preds = doc_question_answerer( ... question="ما هو المبلغ الإجمالي؟", ... image=image, ... ) >>> preds [{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] ``` نأمل أن تكون هذه الصفحة قد زودتك ببعض المعلومات الأساسية حول جميع أنواع المهام في كل طريقة وأهمية كل منها العملية. في القسم التالي، ستتعلم كيف تعمل مكتبة 🤗 Transformers لحل هذه المهام.
transformers/docs/source/ar/task_summary.md/0
{ "file_path": "transformers/docs/source/ar/task_summary.md", "repo_id": "transformers", "token_count": 14746 }
5
# استكشاف الأخطاء وإصلاحها تحدث الأخطاء أحيانًا، لكننا هنا للمساعدة! يغطي هذا الدليل بعض المشكلات الأكثر شيوعًا التي واجهناها وكيفية حلها. مع ذلك، لا يُقصد بهذا الدليل أن يكون مجموعة شاملة لكل مشكلات 🤗 Transformers. لمزيد من المساعدة في استكشاف مشكلتك وإصلاحها، جرب ما يلي: <Youtube id="S2EEG3JIt2A"/> 1. اطلب المساعدة على [المنتديات](https://discuss.huggingface.co/). هناك فئات محددة يمكنك نشر سؤالك فيها، مثل [المبتدئين](https://discuss.huggingface.co/c/beginners/5) أو [🤗 Transformers](https://discuss.huggingface.co/c/transformers/9). تأكد من كتابة منشور جيد وواضح على المنتدى مع بعض التعليمات البرمجية القابلة للتكرار لزيادة احتمالية حل مشكلتك! <Youtube id="_PAli-V4wj0"/> 2. قم بإنشاء [مشكلة](https://github.com/huggingface/transformers/issues/new/choose) في مستودع 🤗 Transformers إذا كانت هناك مشكلة متعلقة بالمكتبة. حاول تضمين أكبر قدر ممكن من المعلومات التي تصف المشكلة لمساعدتنا في معرفة ما هو الخطأ وكيفية إصلاحه. 3. تحقق من دليل [الترحيل](migration) إذا كنت تستخدم إصدارًا أقدم من مكتبة 🤗 Transformers حيث تم إدخال بعض التغييرات المهمة بين الإصدارات. للحصول على مزيد من التفاصيل حول استكشاف الأخطاء وإصلاحها والحصول على المساعدة، راجع [الفصل 8](https://huggingface.co/course/chapter8/1?fw=pt) من دورة Hugging Face. ## بيئات جدار الحماية بعض وحدات معالجة الرسومات (GPU) على السحابة وإعدادات الشبكة الداخلية محمية بجدار حماية من الاتصالات الخارجية، مما يؤدي إلى حدوث خطأ في الاتصال. عندما تحاول تعليمات البرنامج النصي تنزيل أوزان النموذج أو مجموعات البيانات، سيتوقف التنزيل ثم ينتهي بخطأ مثل: ``` ValueError: Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on. ``` في هذه الحالة، يجب محاولة تشغيل 🤗 Transformers في [وضع عدم الاتصال](installation#offline-mode) لتجنب خطأ الاتصال. ## CUDA نفاد الذاكرة يمكن أن يكون تدريب النماذج الكبيرة التي تحتوي على ملايين المعلمات أمرًا صعبًا بدون الأجهزة المناسبة. أحد الأخطاء الشائعة التي قد تواجهها عند نفاد ذاكرة GPU هو: ``` CUDA out of memory. Tried to allocate 256.00 MiB (GPU 0; 11.17 GiB total capacity; 9.70 GiB already allocated; 179.81 MiB free; 9.85 GiB reserved in total by PyTorch) ``` فيما يلي بعض الحلول المحتملة التي يمكنك تجربتها لتقليل استخدام الذاكرة: - قلل من قيمة [`per_device_train_batch_size`](main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size) في [`TrainingArguments`]. - حاول استخدام [`gradient_accumulation_steps`](main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps) في [`TrainingArguments`] لزيادة حجم الدُفعة بشكل فعال. <Tip> راجع دليل [الأداء](performance) لمزيد من التفاصيل حول تقنيات توفير الذاكرة. </Tip> ## عدم القدرة على تحميل نموذج TensorFlow محفوظ تقوم طريقة TensorFlow [model.save](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) بحفظ النموذج بالكامل - الهندسة المعمارية، الأوزان، تكوين التدريب - في ملف واحد. ومع ذلك، عند تحميل ملف النموذج مرة أخرى، قد تواجه خطأ لأن مكتبة 🤗 Transformers قد لا تقوم بتحميل جميع الكائنات المتعلقة بـ TensorFlow في ملف النموذج. لتجنب المشكلات المتعلقة بحفظ وتحميل نماذج TensorFlow، نوصي بما يلي: - احفظ أوزان النموذج كملف `h5` باستخدام [`model.save_weights`](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) ثم أعد تحميل النموذج باستخدام [`~TFPreTrainedModel.from_pretrained`]: ```python >>> from transformers import TFPreTrainedModel >>> from tensorflow import keras >>> model.save_weights("some_folder/tf_model.h5") >>> model = TFPreTrainedModel.from_pretrained("some_folder") ``` - احفظ النموذج باستخدام [`~TFPretrainedModel.save_pretrained`] وقم بتحميله مرة أخرى باستخدام [`~TFPreTrainedModel.from_pretrained`]: ```python >>> from transformers import TFPreTrainedModel >>> model.save_pretrained("path_to/model") >>> model = TFPreTrainedModel.from_pretrained("path_to/model") ``` ## ImportError خطأ شائع آخر قد تواجهه، خاصة إذا كان نموذجًا تم إصداره حديثًا، هو `ImportError`: ``` ImportError: cannot import name 'ImageGPTImageProcessor' from 'transformers' (unknown location) ``` بالنسبة لأنواع الأخطاء هذه، تحقق من أن لديك أحدث إصدار من مكتبة Hugging Face Transformers مثبتًا للوصول إلى أحدث النماذج: ```bash pip install transformers --upgrade ``` ## خطأ CUDA: تم تشغيل التأكيد على جانب الجهاز في بعض الأحيان، قد تواجه خطأ CUDA عامًا حول خطأ في كود الجهاز. ``` RuntimeError: CUDA error: device-side assert triggered ``` يجب عليك محاولة تشغيل الكود على وحدة المعالجة المركزية (CPU) أولاً للحصول على رسالة خطأ أكثر دقة. أضف متغير البيئة التالي في بداية كودك للتبديل إلى وحدة المعالجة المركزية: ```python >>> import os >>> os.environ["CUDA_VISIBLE_DEVICES"] = "" ``` الخيار الآخر هو الحصول على تتبع مكدس أفضل من GPU. أضف متغير البيئة التالي في بداية كودك للحصول على تتبع المكدس للإشارة إلى مصدر الخطأ: ```python >>> import os >>> os.environ["CUDA_LAUNCH_BLOCKING"] = "1" ``` ## إخراج غير صحيح عند عدم إخفاء رموز الحشو في بعض الحالات، قد يكون `hidden_state` غير صحيحة إذا تضمنت `input_ids` رموز حشو. ولإثبات ذلك، قم بتحميل نموذج ومجزىء لغوى. يمكنك الوصول إلى `pad_token_id` للنموذج لمعرفة قيمته. قد تكون `pad_token_id` `None` لبعض النماذج، ولكن يمكنك دائمًا تعيينها يدويًا. ```python >>> from transformers import AutoModelForSequenceClassification >>> import torch >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-base-uncased") >>> model.config.pad_token_id 0 ``` يوضح المثال التالي المُخرجات بدون إخفاء رموز الحشو: ```python >>> input_ids = torch.tensor([[7592, 2057, 2097, 2393, 9611, 2115], [7592, 0, 0, 0, 0, 0]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [ 0.1317, -0.1683]], grad_fn=<AddmmBackward0>) ``` هنا المُخرجات الفعلية للتسلسل الثاني: ```python >>> input_ids = torch.tensor([[7592]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` يجب عليك في معظم الوقت توفير `attention_mask` للنموذج لتجاهل رموز الحشو لتجنب هذا الخطأ الصامت. الآن يتطابق مُخرجات التسلسل الثاني مع مُخرجاته الفعلية: <Tip> بشكل افتراضي، ينشئ مجزىء النصوص `attention_mask` لك استنادًا إلى إعدادات المجزىء المحدد. </Tip> ```python >>> attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0]]) >>> output = model(input_ids, attention_mask=attention_mask) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` لا ينشئ 🤗 Transformers تلقائيًا `attention_mask` لإخفاء رمز الحشو إذا تم توفيره لأن: - بعض النماذج ليس لها رمز حشو. - بالنسبة لبعض الاستخدامات، يريد المستخدمون أن ينتبه النموذج إلى رمز الحشو. ## ValueError: فئة التكوين غير المعترف بها XYZ لهذا النوع من AutoModel بشكل عام، نوصي باستخدام فئة [`AutoModel`] لتحميل النسخ المدربة مسبقًا من النماذج. يمكن لهذه الفئة أن تستنتج وتُحمل تلقائيًا البنية الصحيحة من نسخ معينة بناءً على التكوين. إذا رأيت هذا الخطأ `ValueError` عند تحميل نموذج من نسخة، فهذا يعني أن الفئة التلقائية (Auto) لم تتمكن من العثور على خريطة من التكوين في نقطة التفتيش المعطاة إلى نوع النموذج الذي تُحاول تحميله. وغالبًا ما يحدث هذا عندما لا تدعم نقطة التفتيش مهمة معينة. على سبيل المثال، سترى هذا الخطأ في المثال التالي لأنه لا يوجد GPT2 للإجابة على الأسئلة: ```py >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering >>> processor = AutoProcessor.from_pretrained("openai-community/gpt2-medium") >>> model = AutoModelForQuestionAnswering.from_pretrained("openai-community/gpt2-medium") ValueError: Unrecognized configuration class <class 'transformers.models.gpt2.configuration_gpt2.GPT2Config'> for this kind of AutoModel: AutoModelForQuestionAnswering. Model type should be one of AlbertConfig, BartConfig, BertConfig, BigBirdConfig, BigBirdPegasusConfig, BloomConfig, ... ```
transformers/docs/source/ar/troubleshooting.md/0
{ "file_path": "transformers/docs/source/ar/troubleshooting.md", "repo_id": "transformers", "token_count": 5400 }
6
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Schnellstart [[open-in-colab]] Mit 🤗 Transformers können Sie sofort loslegen! Verwenden Sie die [`pipeline`] für schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer [AutoClass](./model_doc/auto), um Ihre Text-, Bild- oder Audioaufgabe zu lösen. <Tip> Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter für PyTorch und TensorFlow. Wenn nicht, wird erwartet, dass der Code für beide Backends ohne Änderungen funktioniert. </Tip> ## Pipeline [`pipeline`] ist der einfachste Weg, ein vortrainiertes Modell für eine bestimmte Aufgabe zu verwenden. <Youtube id="tiZFewofSLM"/> Die [`pipeline`] unterstützt viele gängige Aufgaben: **Text**: * Stimmungsanalyse: Klassifizierung der Polarität eines gegebenen Textes. * Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe. * Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der Entität, die es repräsentiert (Person, Datum, Ort usw.). * Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind. * Fill-mask: Ausfüllen von Lücken in einem Text mit maskierten Wörtern. * Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz. * Übersetzung: Übersetzen eines Textes in eine andere Sprache. * Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes. **Bild**: * Bildklassifizierung: Klassifizierung eines Bildes. * Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild. * Objekterkennung: Erkennen von Objekten innerhalb eines Bildes. **Audio**: * Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment. * Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text. <Tip> Für mehr Details über die [`pipeline`] und assoziierte Aufgaben, schauen Sie in die Dokumentation [hier](./main_classes/pipelines). </Tip> ### Verwendung der Pipeline Im folgenden Beispiel werden Sie die [`pipeline`] für die Stimmungsanalyse verwenden. Installieren Sie die folgenden Abhängigkeiten, falls Sie dies nicht bereits getan haben: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> Importieren sie die [`pipeline`] und spezifizieren sie die Aufgabe, welche sie lösen möchten: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` Die Pipeline lädt ein standardmäßiges [vortrainiertes Modell](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) und einen Tokenizer für die Stimmungs-Analyse herunter und speichert sie. Jetzt können Sie den "Klassifikator" auf Ihren Zieltext anwenden: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` Die [`pipeline`] kann auch über einen ganzen Datensatz iterieren. Starten wir mit der Installation der [🤗 Datasets](https://huggingface.co/docs/datasets/) Bibliothek: ```bash pip install datasets ``` Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell welches wir nutzen möchten. ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` Wir müssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der `facebook/wav2vec2-base-960h` trainiert wurde. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte "audio" aufgerufen wird. Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und übergeben wir sie als Liste an die Pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` Bei einem größeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste übergeben, der alle Eingaben in den Speicher lädt. Weitere Informationen finden Sie in der [Pipeline-Dokumentation](./main_classes/pipelines). ### Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden Die [`pipeline`] kann jedes Modell aus dem [Model Hub](https://huggingface.co/models) verwenden, wodurch es einfach ist, die [`pipeline`] für andere Anwendungsfälle anzupassen. Wenn Sie beispielsweise ein Modell wünschen, das französischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges [BERT-Modell](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment), das auf die Stimmungsanalyse abgestimmt ist. Großartig, verwenden wir dieses Modell! ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> Dann können Sie das Modell und den Tokenizer in der [`pipeline`] angeben und den `Klassifikator` auf Ihren Zieltext anwenden: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` Wenn Sie kein Modell für Ihren Anwendungsfall finden können, müssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser [Feinabstimmungs-Tutorial](./training) an, um zu erfahren, wie das geht. Und schließlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial [hier](./model_sharing)), um NLP für alle zu demokratisieren! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> Unter der Haube arbeiten die Klassen [`AutoModelForSequenceClassification`] und [`AutoTokenizer`] zusammen, um die [`pipeline`] zu betreiben. Eine [`AutoClass`](./model_doc/auto) ist eine Abkürzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie müssen nur die passende `AutoClass` für Ihre Aufgabe und den zugehörigen Tokenizer mit [`AutoTokenizer`] auswählen. Kehren wir zu unserem Beispiel zurück und sehen wir uns an, wie Sie die `AutoClass` verwenden können, um die Ergebnisse der [`pipeline`] zu replizieren. ### AutoTokenizer Ein Tokenizer ist für die Vorverarbeitung von Text in ein für das Modell verständliches Format zuständig. Zunächst zerlegt der Tokenisierer den Text in Wörter, die *Token* genannt werden. Es gibt mehrere Regeln für den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen über Tokenisierung [hier](./tokenizer_summary)). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren müssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde. Laden sie einen Tokenizer mit [`AutoTokenizer`]: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Anschließend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe für das Modell zu konstruieren. Dieser wird als *Vokabular* des Modells bezeichnet. Übergeben Sie Ihren Text an den Tokenizer: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Der Tokenizer gibt ein Wörterbuch zurück, das Folgendes enthält: * [input_ids](./glossary#input-ids): numerische Repräsentationen Ihrer Token. * [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen. Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. Darüber hinaus kann der Tokenizer den Text auch auffüllen und kürzen, um einen Stapel mit einheitlicher Länge zurückzugeben: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> Lesen Sie das Tutorial [preprocessing](./preprocessing) für weitere Details zur Tokenisierung. ### AutoModel <frameworkcontent> <pt> 🤗 Transformers bietet eine einfache und einheitliche Möglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein [`AutoModel`] laden können, wie Sie einen [`AutoTokenizer`] laden würden. Der einzige Unterschied ist die Auswahl des richtigen [`AutoModel`] für die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist. </Tip> Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben. Sie müssen nur das Wörterbuch entpacken, indem Sie `**` hinzufügen: ```py >>> pt_outputs = pt_model(**pt_batch) ``` Das Modell gibt die endgültigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein [`TFAutoModel`] genauso laden können, wie Sie einen [`AutoTokenizer`] laden würden. Der einzige Unterschied ist die Auswahl des richtigen [`TFAutoModel`] für die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist. </Tip> Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben, indem Sie die Wörterbuchschlüssel direkt an die Tensoren übergeben: ```py >>> tf_outputs = tf_model(tf_batch) ``` Das Modell gibt die endgültigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> Alle 🤗 Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren *vor* der endgültigen Aktivierungsfunktion Funktion (wie Softmax) aus, da die endgültige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist. </Tip> Modelle sind ein standardmäßiges [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) oder ein [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model), sodass Sie sie in Ihrer üblichen Trainingsschleife verwenden können. Um jedoch die Dinge einfacher zu machen, bietet 🤗 Transformers eine [`Trainer`]-Klasse für PyTorch, die Funktionalität für verteiltes Training, gemischte Präzision und mehr bietet. Für TensorFlow können Sie die Methode `fit` aus [Keras](https://keras.io/) verwenden. Siehe das [training tutorial](./training) für weitere Details. <Tip> Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollständigt werden. Die Modellausgänge verhalten sich auch wie ein Tupel oder ein Wörterbuch (z.B. können Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die "None" sind, ignoriert werden. </Tip> ### Modell speichern <frameworkcontent> <pt> Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer speichern, indem Sie [`PreTrainedModel.save_pretrained`] verwenden: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit [`PreTrainedModel.from_pretrained`]: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer unter Verwendung von [`TFPreTrainedModel.save_pretrained`] speichern: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit [`TFPreTrainedModel.from_pretrained`]: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> Ein besonders cooles 🤗 Transformers-Feature ist die Möglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter "from_pt" oder "from_tf" kann das Modell von einem Framework in das andere konvertieren: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## Custom model builds Sie können die Konfigurationsklasse des Modells ändern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitsköpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufällig initialisiert, und Sie müssen das Modell trainieren, bevor Sie es verwenden können, um aussagekräftige Ergebnisse zu erhalten. Beginnen Sie mit dem Import von [`AutoConfig`] und laden Sie dann das trainierte Modell, das Sie ändern möchten. Innerhalb von [`AutoConfig.from_pretrained`] können Sie das Attribut angeben, das Sie ändern möchten, z. B. die Anzahl der Aufmerksamkeitsköpfe: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> Create a model from your custom configuration with [`AutoModel.from_config`]: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> Create a model from your custom configuration with [`TFAutoModel.from_config`]: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung [Erstellen einer benutzerdefinierten Architektur](./create_a_model). ## Wie geht es weiter? Nachdem Sie nun die 🤗 Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun können, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells für eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr über die Kernkonzepte von 🤗 Transformers erfahren möchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen Leitfäden!
transformers/docs/source/de/quicktour.md/0
{ "file_path": "transformers/docs/source/de/quicktour.md", "repo_id": "transformers", "token_count": 7324 }
7
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Instantiate a big model A barrier to accessing very large pretrained models is the amount of memory required. When loading a pretrained PyTorch model, you usually: 1. Create a model with random weights. 2. Load your pretrained weights. 3. Put those pretrained weights in the model. The first two steps both require a full version of the model in memory and if the model weighs several GBs, you may not have enough memory for two copies of it. This problem is amplified in distributed training environments because each process loads a pretrained model and stores two copies in memory. > [!TIP] > The randomly created model is initialized with "empty" tensors, which take space in memory without filling it. The random values are whatever was in this chunk of memory at the time. To improve loading speed, the [`_fast_init`](https://github.com/huggingface/transformers/blob/c9f6e5e35156e068b227dd9b15521767f6afd4d2/src/transformers/modeling_utils.py#L2710) parameter is set to `True` by default to skip the random initialization for all weights that are correctly loaded. This guide will show you how Transformers can help you load large pretrained models despite their memory requirements. ## Sharded checkpoints From Transformers v4.18.0, a checkpoint larger than 10GB is automatically sharded by the [`~PreTrainedModel.save_pretrained`] method. It is split into several smaller partial checkpoints and creates an index file that maps parameter names to the files they're stored in. The maximum shard size is controlled with the `max_shard_size` parameter, but by default it is 5GB, because it is easier to run on free-tier GPU instances without running out of memory. For example, let's shard [BioMistral/BioMistral-7B](https://hf.co/BioMistral/BioMistral-7B). ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'generation_config.json', 'model-00001-of-00006.safetensors', 'model-00002-of-00006.safetensors', 'model-00003-of-00006.safetensors', 'model-00004-of-00006.safetensors', 'model-00005-of-00006.safetensors', 'model-00006-of-00006.safetensors', 'model.safetensors.index.json'] ``` The sharded checkpoint is reloaded with the [`~PreTrainedModel.from_pretrained`] method. ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... new_model = AutoModel.from_pretrained(tmp_dir) ``` The main advantage of sharded checkpoints for big models is that each shard is loaded after the previous one, which caps the memory usage to only the model size and the largest shard size. You could also directly load a sharded checkpoint inside a model without the [`~PreTrainedModel.from_pretrained`] method (similar to PyTorch's `load_state_dict()` method for a full checkpoint). In this case, use the [`~modeling_utils.load_sharded_checkpoint`] method. ```py >>> from transformers.modeling_utils import load_sharded_checkpoint >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... load_sharded_checkpoint(model, tmp_dir) ``` ### Shard metadata The index file determines which keys are in the checkpoint and where the corresponding weights are stored. This file is loaded like any other JSON file and you can get a dictionary from it. ```py >>> import json >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="5GB") ... with open(os.path.join(tmp_dir, "model.safetensors.index.json"), "r") as f: ... index = json.load(f) >>> print(index.keys()) dict_keys(['metadata', 'weight_map']) ``` The `metadata` key provides the total model size. ```py >>> index["metadata"] {'total_size': 28966928384} ``` The `weight_map` key maps each parameter name (typically `state_dict` in a PyTorch model) to the shard it's stored in. ```py >>> index["weight_map"] {'lm_head.weight': 'model-00006-of-00006.safetensors', 'model.embed_tokens.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.input_layernorm.weight': 'model-00001-of-00006.safetensors', 'model.layers.0.mlp.down_proj.weight': 'model-00001-of-00006.safetensors', ... } ``` ## Accelerate's Big Model Inference > [!TIP] > Make sure you have Accelerate v0.9.0 or later and PyTorch v1.9.0 or later installed. From Transformers v4.20.0, the [`~PreTrainedModel.from_pretrained`] method is supercharged with Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature to efficiently handle really big models! Big Model Inference creates a *model skeleton* on PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. The randomly initialized parameters are only created when the pretrained weights are loaded. This way, you aren't keeping two copies of the model in memory at the same time (one for the randomly initialized model and one for the pretrained weights), and the maximum memory consumed is only the full model size. To enable Big Model Inference in Transformers, set `low_cpu_mem_usage=True` in the [`~PreTrainedModel.from_pretrained`] method. ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", low_cpu_mem_usage=True) ``` Accelerate automatically dispatches the model weights across all available devices, starting with the fastest device (GPU) first and then offloading to the slower devices (CPU and even hard drive). This is enabled by setting `device_map="auto"` in the [`~PreTrainedModel.from_pretrained`] method. When you pass the `device_map` parameter, `low_cpu_mem_usage` is automatically set to `True` so you don't need to specify it. ```py from transformers import AutoModelForCausalLM # these loading methods are equivalent gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto") gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", low_cpu_mem_usage=True) ``` You can also write your own `device_map` by mapping each layer to a device. It should map all model parameters to a device, but you don't have to detail where all the submodules of a layer go if the entire layer is on the same device. ```python device_map = {"model.layers.1": 0, "model.layers.14": 1, "model.layers.31": "cpu", "lm_head": "disk"} ``` Access `hf_device_map` attribute to see how Accelerate split the model across devices. ```py gemma.hf_device_map ``` ```python out {'model.embed_tokens': 0, 'model.layers.0': 0, 'model.layers.1': 0, 'model.layers.2': 0, 'model.layers.3': 0, 'model.layers.4': 0, 'model.layers.5': 0, 'model.layers.6': 0, 'model.layers.7': 0, 'model.layers.8': 0, 'model.layers.9': 0, 'model.layers.10': 0, 'model.layers.11': 0, 'model.layers.12': 0, 'model.layers.13': 0, 'model.layers.14': 'cpu', 'model.layers.15': 'cpu', 'model.layers.16': 'cpu', 'model.layers.17': 'cpu', 'model.layers.18': 'cpu', 'model.layers.19': 'cpu', 'model.layers.20': 'cpu', 'model.layers.21': 'cpu', 'model.layers.22': 'cpu', 'model.layers.23': 'cpu', 'model.layers.24': 'cpu', 'model.layers.25': 'cpu', 'model.layers.26': 'cpu', 'model.layers.27': 'cpu', 'model.layers.28': 'cpu', 'model.layers.29': 'cpu', 'model.layers.30': 'cpu', 'model.layers.31': 'cpu', 'model.norm': 'cpu', 'lm_head': 'cpu'} ``` ## Model data type PyTorch model weights are normally instantiated as torch.float32 and it can be an issue if you try to load a model as a different data type. For example, you'd need twice as much memory to load the weights in torch.float32 and then again to load them in your desired data type, like torch.float16. > [!WARNING] > Due to how PyTorch is designed, the `torch_dtype` parameter only supports floating data types. To avoid wasting memory like this, explicitly set the `torch_dtype` parameter to the desired data type or set `torch_dtype="auto"` to load the weights with the most optimal memory pattern (the data type is automatically derived from the model weights). <hfoptions id="dtype"> <hfoption id="specific dtype"> ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype=torch.float16) ``` </hfoption> <hfoption id="auto dtype"> ```py from transformers import AutoModelForCausalLM gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype="auto") ``` </hfoption> </hfoptions> You can also set the data type to use for models instantiated from scratch. ```python import torch from transformers import AutoConfig, AutoModel my_config = AutoConfig.from_pretrained("google/gemma-2b", torch_dtype=torch.float16) model = AutoModel.from_config(my_config) ```
transformers/docs/source/en/big_models.md/0
{ "file_path": "transformers/docs/source/en/big_models.md", "repo_id": "transformers", "token_count": 3022 }
8
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Backbone A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes: * [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. * [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. [timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. Backbones are supported for the following models: * [BEiT](../model_doc/beit) * [BiT](../model_doc/bit) * [ConvNext](../model_doc/convnext) * [ConvNextV2](../model_doc/convnextv2) * [DiNAT](../model_doc/dinat) * [DINOV2](../model_doc/dinov2) * [FocalNet](../model_doc/focalnet) * [MaskFormer](../model_doc/maskformer) * [NAT](../model_doc/nat) * [ResNet](../model_doc/resnet) * [Swin Transformer](../model_doc/swin) * [Swin Transformer v2](../model_doc/swinv2) * [ViTDet](../model_doc/vitdet) ## AutoBackbone [[autodoc]] AutoBackbone ## BackboneMixin [[autodoc]] utils.BackboneMixin ## BackboneConfigMixin [[autodoc]] utils.BackboneConfigMixin ## TimmBackbone [[autodoc]] models.timm_backbone.TimmBackbone ## TimmBackboneConfig [[autodoc]] models.timm_backbone.TimmBackboneConfig
transformers/docs/source/en/main_classes/backbones.md/0
{ "file_path": "transformers/docs/source/en/main_classes/backbones.md", "repo_id": "transformers", "token_count": 689 }
9
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization Quantization techniques reduce memory and computational costs by representing weights and activations with lower-precision data types like 8-bit integers (int8). This enables loading larger models you normally wouldn't be able to fit into memory, and speeding up inference. Transformers supports the AWQ and GPTQ quantization algorithms and it supports 8-bit and 4-bit quantization with bitsandbytes. Quantization techniques that aren't supported in Transformers can be added with the [`HfQuantizer`] class. <Tip> Learn how to quantize models in the [Quantization](../quantization) guide. </Tip> ## QuantoConfig [[autodoc]] QuantoConfig ## AqlmConfig [[autodoc]] AqlmConfig ## VptqConfig [[autodoc]] VptqConfig ## AwqConfig [[autodoc]] AwqConfig ## EetqConfig [[autodoc]] EetqConfig ## GPTQConfig [[autodoc]] GPTQConfig ## BitsAndBytesConfig [[autodoc]] BitsAndBytesConfig ## HfQuantizer [[autodoc]] quantizers.base.HfQuantizer ## HiggsConfig [[autodoc]] HiggsConfig ## HqqConfig [[autodoc]] HqqConfig ## FbgemmFp8Config [[autodoc]] FbgemmFp8Config ## CompressedTensorsConfig [[autodoc]] CompressedTensorsConfig ## TorchAoConfig [[autodoc]] TorchAoConfig ## BitNetConfig [[autodoc]] BitNetConfig
transformers/docs/source/en/main_classes/quantization.md/0
{ "file_path": "transformers/docs/source/en/main_classes/quantization.md", "repo_id": "transformers", "token_count": 586 }
10
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BEiT ## Overview The BEiT model was proposed in [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong and Furu Wei. Inspired by BERT, BEiT is the first paper that makes self-supervised pre-training of Vision Transformers (ViTs) outperform supervised pre-training. Rather than pre-training the model to predict the class of an image (as done in the [original ViT paper](https://arxiv.org/abs/2010.11929)), BEiT models are pre-trained to predict visual tokens from the codebook of OpenAI's [DALL-E model](https://arxiv.org/abs/2102.12092) given masked patches. The abstract from the paper is the following: *We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%).* This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit). ## Usage tips - BEiT models are regular Vision Transformers, but pre-trained in a self-supervised way rather than supervised. They outperform both the [original model (ViT)](vit) as well as [Data-efficient Image Transformers (DeiT)](deit) when fine-tuned on ImageNet-1K and CIFAR-100. You can check out demo notebooks regarding inference as well as fine-tuning on custom data [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer) (you can just replace [`ViTFeatureExtractor`] by [`BeitImageProcessor`] and [`ViTForImageClassification`] by [`BeitForImageClassification`]). - There's also a demo notebook available which showcases how to combine DALL-E's image tokenizer with BEiT for performing masked image modeling. You can find it [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BEiT). - As the BEiT models expect each image to be of the same size (resolution), one can use [`BeitImageProcessor`] to resize (or rescale) and normalize images for the model. - Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of each checkpoint. For example, `microsoft/beit-base-patch16-224` refers to a base-sized architecture with patch resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=microsoft/beit). - The available checkpoints are either (1) pre-trained on [ImageNet-22k](http://www.image-net.org/) (a collection of 14 million images and 22k classes) only, (2) also fine-tuned on ImageNet-22k or (3) also fine-tuned on [ImageNet-1k](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - BEiT uses relative position embeddings, inspired by the T5 model. During pre-training, the authors shared the relative position bias among the several self-attention layers. During fine-tuning, each layer's relative position bias is initialized with the shared relative position bias obtained after pre-training. Note that, if one wants to pre-train a model from scratch, one needs to either set the `use_relative_position_bias` or the `use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add position embeddings. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/beit_architecture.jpg" alt="drawing" width="600"/> <small> BEiT pre-training. Taken from the <a href="https://arxiv.org/abs/2106.08254">original paper.</a> </small> ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import BeitForImageClassification model = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224", attn_implementation="sdpa", torch_dtype=torch.float16) ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (NVIDIA GeForce RTX 2060-8GB, PyTorch 2.5.1, OS Ubuntu 20.04) with `float16` and `microsoft/beit-base-patch16-224` model, we saw the following improvements during training and inference: #### Training | num_training_steps | batch_size | image_size | is_cuda | Time per batch (eager - s) | Time per batch (sdpa - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) | |--------------------|------------|--------------|---------|----------------------------|---------------------------|-------------|----------------------|--------------------|----------------| | 50 | 2 | (1048, 640) | True | 0.984 | 0.746 | 31.975 | 6738.915 | 4319.886 | 55.998 | #### Inference | Image batch size | Eager (s/iter) | Eager CI, % | Eager memory (MB) | SDPA (s/iter) | SDPA CI, % | SDPA memory (MB) | SDPA speedup | SDPA memory saved (%) | |-------------------:|-----------------:|:--------------|--------------------:|----------------:|:-------------|-------------------:|---------------:|----------------------:| | 1 | 0.012 | ±0.3% | 3.76657e+08 | 0.011 | ±0.5% | 3.75739e+08 | 1.05 | 0.244 | | 4 | 0.013 | ±0.1% | 4.03147e+08 | 0.011 | ±0.2% | 3.90554e+08 | 1.178 | 3.225 | | 16 | 0.045 | ±0.1% | 4.96697e+08 | 0.035 | ±0.1% | 4.51232e+08 | 1.304 | 10.076 | | 32 | 0.088 | ±0.1% | 6.24417e+08 | 0.066 | ±0.1% | 5.33488e+08 | 1.325 | 17.044 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT. <PipelineTag pipeline="image-classification"/> - [`BeitForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) **Semantic segmentation** - [Semantic segmentation task guide](../tasks/semantic_segmentation) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## BEiT specific outputs [[autodoc]] models.beit.modeling_beit.BeitModelOutputWithPooling [[autodoc]] models.beit.modeling_flax_beit.FlaxBeitModelOutputWithPooling ## BeitConfig [[autodoc]] BeitConfig ## BeitFeatureExtractor [[autodoc]] BeitFeatureExtractor - __call__ - post_process_semantic_segmentation ## BeitImageProcessor [[autodoc]] BeitImageProcessor - preprocess - post_process_semantic_segmentation <frameworkcontent> <pt> ## BeitModel [[autodoc]] BeitModel - forward ## BeitForMaskedImageModeling [[autodoc]] BeitForMaskedImageModeling - forward ## BeitForImageClassification [[autodoc]] BeitForImageClassification - forward ## BeitForSemanticSegmentation [[autodoc]] BeitForSemanticSegmentation - forward </pt> <jax> ## FlaxBeitModel [[autodoc]] FlaxBeitModel - __call__ ## FlaxBeitForMaskedImageModeling [[autodoc]] FlaxBeitForMaskedImageModeling - __call__ ## FlaxBeitForImageClassification [[autodoc]] FlaxBeitForImageClassification - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/beit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/beit.md", "repo_id": "transformers", "token_count": 3501 }
11
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ConvBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=convbert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-convbert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/conv-bert-base"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ConvBERT model was proposed in [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. The abstract from the paper is the following: *Pre-trained language models like BERT and its variants have recently achieved impressive performance in various natural language understanding tasks. However, BERT heavily relies on the global self-attention block and thus suffers large memory footprint and computation cost. Although all its attention heads query on the whole input sequence for generating the attention map from a global perspective, we observe some heads only need to learn local dependencies, which means the existence of computation redundancy. We therefore propose a novel span-based dynamic convolution to replace these self-attention heads to directly model local dependencies. The novel convolution heads, together with the rest self-attention heads, form a new mixed attention block that is more efficient at both global and local context learning. We equip BERT with this mixed attention design and build a ConvBERT model. Experiments have shown that ConvBERT significantly outperforms BERT and its variants in various downstream tasks, with lower training cost and fewer model parameters. Remarkably, ConvBERTbase model achieves 86.4 GLUE score, 0.7 higher than ELECTRAbase, while using less than 1/4 training cost. Code and pre-trained models will be released.* This model was contributed by [abhishek](https://huggingface.co/abhishek). The original implementation can be found here: https://github.com/yitu-opensource/ConvBert ## Usage tips ConvBERT training tips are similar to those of BERT. For usage tips refer to [BERT documentation](bert). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## ConvBertConfig [[autodoc]] ConvBertConfig ## ConvBertTokenizer [[autodoc]] ConvBertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## ConvBertTokenizerFast [[autodoc]] ConvBertTokenizerFast <frameworkcontent> <pt> ## ConvBertModel [[autodoc]] ConvBertModel - forward ## ConvBertForMaskedLM [[autodoc]] ConvBertForMaskedLM - forward ## ConvBertForSequenceClassification [[autodoc]] ConvBertForSequenceClassification - forward ## ConvBertForMultipleChoice [[autodoc]] ConvBertForMultipleChoice - forward ## ConvBertForTokenClassification [[autodoc]] ConvBertForTokenClassification - forward ## ConvBertForQuestionAnswering [[autodoc]] ConvBertForQuestionAnswering - forward </pt> <tf> ## TFConvBertModel [[autodoc]] TFConvBertModel - call ## TFConvBertForMaskedLM [[autodoc]] TFConvBertForMaskedLM - call ## TFConvBertForSequenceClassification [[autodoc]] TFConvBertForSequenceClassification - call ## TFConvBertForMultipleChoice [[autodoc]] TFConvBertForMultipleChoice - call ## TFConvBertForTokenClassification [[autodoc]] TFConvBertForTokenClassification - call ## TFConvBertForQuestionAnswering [[autodoc]] TFConvBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/convbert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/convbert.md", "repo_id": "transformers", "token_count": 1393 }
12
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ELECTRA <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=electra"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-electra-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/electra_large_discriminator_squad2_512"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ELECTRA model was proposed in the paper [ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators](https://openreview.net/pdf?id=r1xMH1BtvB). ELECTRA is a new pretraining approach which trains two transformer models: the generator and the discriminator. The generator's role is to replace tokens in a sequence, and is therefore trained as a masked language model. The discriminator, which is the model we're interested in, tries to identify which tokens were replaced by the generator in the sequence. The abstract from the paper is the following: *Masked language modeling (MLM) pretraining methods such as BERT corrupt the input by replacing some tokens with [MASK] and then train a model to reconstruct the original tokens. While they produce good results when transferred to downstream NLP tasks, they generally require large amounts of compute to be effective. As an alternative, we propose a more sample-efficient pretraining task called replaced token detection. Instead of masking the input, our approach corrupts it by replacing some tokens with plausible alternatives sampled from a small generator network. Then, instead of training a model that predicts the original identities of the corrupted tokens, we train a discriminative model that predicts whether each token in the corrupted input was replaced by a generator sample or not. Thorough experiments demonstrate this new pretraining task is more efficient than MLM because the task is defined over all input tokens rather than just the small subset that was masked out. As a result, the contextual representations learned by our approach substantially outperform the ones learned by BERT given the same model size, data, and compute. The gains are particularly strong for small models; for example, we train a model on one GPU for 4 days that outperforms GPT (trained using 30x more compute) on the GLUE natural language understanding benchmark. Our approach also works well at scale, where it performs comparably to RoBERTa and XLNet while using less than 1/4 of their compute and outperforms them when using the same amount of compute.* This model was contributed by [lysandre](https://huggingface.co/lysandre). The original code can be found [here](https://github.com/google-research/electra). ## Usage tips - ELECTRA is the pretraining approach, therefore there is nearly no changes done to the underlying model: BERT. The only change is the separation of the embedding size and the hidden size: the embedding size is generally smaller, while the hidden size is larger. An additional projection layer (linear) is used to project the embeddings from their embedding size to the hidden size. In the case where the embedding size is the same as the hidden size, no projection layer is used. - ELECTRA is a transformer model pretrained with the use of another (small) masked language model. The inputs are corrupted by that language model, which takes an input text that is randomly masked and outputs a text in which ELECTRA has to predict which token is an original and which one has been replaced. Like for GAN training, the small language model is trained for a few steps (but with the original texts as objective, not to fool the ELECTRA model like in a traditional GAN setting) then the ELECTRA model is trained for a few steps. - The ELECTRA checkpoints saved using [Google Research's implementation](https://github.com/google-research/electra) contain both the generator and discriminator. The conversion script requires the user to name which model to export into the correct architecture. Once converted to the HuggingFace format, these checkpoints may be loaded into all available ELECTRA models, however. This means that the discriminator may be loaded in the [`ElectraForMaskedLM`] model, and the generator may be loaded in the [`ElectraForPreTraining`] model (the classification head will be randomly initialized as it doesn't exist in the generator). ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## ElectraConfig [[autodoc]] ElectraConfig ## ElectraTokenizer [[autodoc]] ElectraTokenizer ## ElectraTokenizerFast [[autodoc]] ElectraTokenizerFast ## Electra specific outputs [[autodoc]] models.electra.modeling_electra.ElectraForPreTrainingOutput [[autodoc]] models.electra.modeling_tf_electra.TFElectraForPreTrainingOutput <frameworkcontent> <pt> ## ElectraModel [[autodoc]] ElectraModel - forward ## ElectraForPreTraining [[autodoc]] ElectraForPreTraining - forward ## ElectraForCausalLM [[autodoc]] ElectraForCausalLM - forward ## ElectraForMaskedLM [[autodoc]] ElectraForMaskedLM - forward ## ElectraForSequenceClassification [[autodoc]] ElectraForSequenceClassification - forward ## ElectraForMultipleChoice [[autodoc]] ElectraForMultipleChoice - forward ## ElectraForTokenClassification [[autodoc]] ElectraForTokenClassification - forward ## ElectraForQuestionAnswering [[autodoc]] ElectraForQuestionAnswering - forward </pt> <tf> ## TFElectraModel [[autodoc]] TFElectraModel - call ## TFElectraForPreTraining [[autodoc]] TFElectraForPreTraining - call ## TFElectraForMaskedLM [[autodoc]] TFElectraForMaskedLM - call ## TFElectraForSequenceClassification [[autodoc]] TFElectraForSequenceClassification - call ## TFElectraForMultipleChoice [[autodoc]] TFElectraForMultipleChoice - call ## TFElectraForTokenClassification [[autodoc]] TFElectraForTokenClassification - call ## TFElectraForQuestionAnswering [[autodoc]] TFElectraForQuestionAnswering - call </tf> <jax> ## FlaxElectraModel [[autodoc]] FlaxElectraModel - __call__ ## FlaxElectraForPreTraining [[autodoc]] FlaxElectraForPreTraining - __call__ ## FlaxElectraForCausalLM [[autodoc]] FlaxElectraForCausalLM - __call__ ## FlaxElectraForMaskedLM [[autodoc]] FlaxElectraForMaskedLM - __call__ ## FlaxElectraForSequenceClassification [[autodoc]] FlaxElectraForSequenceClassification - __call__ ## FlaxElectraForMultipleChoice [[autodoc]] FlaxElectraForMultipleChoice - __call__ ## FlaxElectraForTokenClassification [[autodoc]] FlaxElectraForTokenClassification - __call__ ## FlaxElectraForQuestionAnswering [[autodoc]] FlaxElectraForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/electra.md/0
{ "file_path": "transformers/docs/source/en/model_doc/electra.md", "repo_id": "transformers", "token_count": 2211 }
13
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPTSAN-japanese <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2. You can do so by running the following command: `pip install -U transformers==4.40.2`. </Tip> ## Overview The GPTSAN-japanese model was released in the repository by Toshiyuki Sakamoto (tanreinama). GPTSAN is a Japanese language model using Switch Transformer. It has the same structure as the model introduced as Prefix LM in the T5 paper, and support both Text Generation and Masked Language Modeling tasks. These basic tasks similarly can fine-tune for translation or summarization. ### Usage example The `generate()` method can be used to generate text using GPTSAN-Japanese model. ```python >>> from transformers import AutoModel, AutoTokenizer >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese") >>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").cuda() >>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt") >>> torch.manual_seed(0) >>> gen_tok = model.generate(x_tok.input_ids.cuda(), token_type_ids=x_tok.token_type_ids.cuda(), max_new_tokens=20) >>> tokenizer.decode(gen_tok[0]) '織田信長は、2004年に『戦国BASARA』のために、豊臣秀吉' ``` ## GPTSAN Features GPTSAN has some unique features. It has a model structure of Prefix-LM. It works as a shifted Masked Language Model for Prefix Input tokens. Un-prefixed inputs behave like normal generative models. The Spout vector is a GPTSAN specific input. Spout is pre-trained with random inputs, but you can specify a class of text or an arbitrary vector during fine-tuning. This allows you to indicate the tendency of the generated text. GPTSAN has a sparse Feed Forward based on Switch-Transformer. You can also add other layers and train them partially. See the original GPTSAN repository for details. ### Prefix-LM Model GPTSAN has the structure of the model named Prefix-LM in the `T5` paper. (The original GPTSAN repository calls it `hybrid`) In GPTSAN, the `Prefix` part of Prefix-LM, that is, the input position that can be referenced by both tokens, can be specified with any length. Arbitrary lengths can also be specified differently for each batch. This length applies to the text entered in `prefix_text` for the tokenizer. The tokenizer returns the mask of the `Prefix` part of Prefix-LM as `token_type_ids`. The model treats the part where `token_type_ids` is 1 as a `Prefix` part, that is, the input can refer to both tokens before and after. ## Usage tips Specifying the Prefix part is done with a mask passed to self-attention. When token_type_ids=None or all zero, it is equivalent to regular causal mask for example: >>> x_token = tokenizer("アイウエ") input_ids: | SOT | SEG | ア | イ | ウ | エ | token_type_ids: | 1 | 0 | 0 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 0 0 0 0 0 | SEG | 1 1 0 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | >>> x_token = tokenizer("", prefix_text="アイウエ") input_ids: | SOT | ア | イ | ウ | エ | SEG | token_type_ids: | 1 | 1 | 1 | 1 | 1 | 0 | prefix_lm_mask: SOT | 1 1 1 1 1 0 | ア | 1 1 1 1 1 0 | イ | 1 1 1 1 1 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 0 | SEG | 1 1 1 1 1 1 | >>> x_token = tokenizer("ウエ", prefix_text="アイ") input_ids: | SOT | ア | イ | SEG | ウ | エ | token_type_ids: | 1 | 1 | 1 | 0 | 0 | 0 | prefix_lm_mask: SOT | 1 1 1 0 0 0 | ア | 1 1 1 0 0 0 | イ | 1 1 1 0 0 0 | SEG | 1 1 1 1 0 0 | ウ | 1 1 1 1 1 0 | エ | 1 1 1 1 1 1 | ### Spout Vector A Spout Vector is a special vector for controlling text generation. This vector is treated as the first embedding in self-attention to bring extraneous attention to the generated tokens. In the pre-trained model published from `Tanrei/GPTSAN-japanese`, the Spout Vector is a 128-dimensional vector that passes through 8 fully connected layers in the model and is projected into the space acting as external attention. The Spout Vector projected by the fully connected layer is split to be passed to all self-attentions. ## GPTSanJapaneseConfig [[autodoc]] GPTSanJapaneseConfig ## GPTSanJapaneseTokenizer [[autodoc]] GPTSanJapaneseTokenizer ## GPTSanJapaneseModel [[autodoc]] GPTSanJapaneseModel ## GPTSanJapaneseForConditionalGeneration [[autodoc]] GPTSanJapaneseForConditionalGeneration - forward
transformers/docs/source/en/model_doc/gptsan-japanese.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gptsan-japanese.md", "repo_id": "transformers", "token_count": 1750 }
14
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. specific language governing permissions and limitations under the License. --> # ImageGPT ## Overview The ImageGPT model was proposed in [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. ImageGPT (iGPT) is a GPT-2-like model trained to predict the next pixel value, allowing for both unconditional and conditional image generation. The abstract from the paper is the following: *Inspired by progress in unsupervised representation learning for natural language, we examine whether similar models can learn useful representations for images. We train a sequence Transformer to auto-regressively predict pixels, without incorporating knowledge of the 2D input structure. Despite training on low-resolution ImageNet without labels, we find that a GPT-2 scale model learns strong image representations as measured by linear probing, fine-tuning, and low-data classification. On CIFAR-10, we achieve 96.3% accuracy with a linear probe, outperforming a supervised Wide ResNet, and 99.0% accuracy with full fine-tuning, matching the top supervised pre-trained models. We are also competitive with self-supervised benchmarks on ImageNet when substituting pixels for a VQVAE encoding, achieving 69.0% top-1 accuracy on a linear probe of our features.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/imagegpt_architecture.png" alt="drawing" width="600"/> <small> Summary of the approach. Taken from the [original paper](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf). </small> This model was contributed by [nielsr](https://huggingface.co/nielsr), based on [this issue](https://github.com/openai/image-gpt/issues/7). The original code can be found [here](https://github.com/openai/image-gpt). ## Usage tips - ImageGPT is almost exactly the same as [GPT-2](gpt2), with the exception that a different activation function is used (namely "quick gelu"), and the layer normalization layers don't mean center the inputs. ImageGPT also doesn't have tied input- and output embeddings. - As the time- and memory requirements of the attention mechanism of Transformers scales quadratically in the sequence length, the authors pre-trained ImageGPT on smaller input resolutions, such as 32x32 and 64x64. However, feeding a sequence of 32x32x3=3072 tokens from 0..255 into a Transformer is still prohibitively large. Therefore, the authors applied k-means clustering to the (R,G,B) pixel values with k=512. This way, we only have a 32*32 = 1024-long sequence, but now of integers in the range 0..511. So we are shrinking the sequence length at the cost of a bigger embedding matrix. In other words, the vocabulary size of ImageGPT is 512, + 1 for a special "start of sentence" (SOS) token, used at the beginning of every sequence. One can use [`ImageGPTImageProcessor`] to prepare images for the model. - Despite being pre-trained entirely unsupervised (i.e. without the use of any labels), ImageGPT produces fairly performant image features useful for downstream tasks, such as image classification. The authors showed that the features in the middle of the network are the most performant, and can be used as-is to train a linear model (such as a sklearn logistic regression model for example). This is also referred to as "linear probing". Features can be easily obtained by first forwarding the image through the model, then specifying `output_hidden_states=True`, and then average-pool the hidden states at whatever layer you like. - Alternatively, one can further fine-tune the entire model on a downstream dataset, similar to BERT. For this, you can use [`ImageGPTForImageClassification`]. - ImageGPT comes in different sizes: there's ImageGPT-small, ImageGPT-medium and ImageGPT-large. The authors did also train an XL variant, which they didn't release. The differences in size are summarized in the following table: | **Model variant** | **Depths** | **Hidden sizes** | **Decoder hidden size** | **Params (M)** | **ImageNet-1k Top 1** | |---|---|---|---|---|---| | MiT-b0 | [2, 2, 2, 2] | [32, 64, 160, 256] | 256 | 3.7 | 70.5 | | MiT-b1 | [2, 2, 2, 2] | [64, 128, 320, 512] | 256 | 14.0 | 78.7 | | MiT-b2 | [3, 4, 6, 3] | [64, 128, 320, 512] | 768 | 25.4 | 81.6 | | MiT-b3 | [3, 4, 18, 3] | [64, 128, 320, 512] | 768 | 45.2 | 83.1 | | MiT-b4 | [3, 8, 27, 3] | [64, 128, 320, 512] | 768 | 62.6 | 83.6 | | MiT-b5 | [3, 6, 40, 3] | [64, 128, 320, 512] | 768 | 82.0 | 83.8 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ImageGPT. <PipelineTag pipeline="image-classification"/> - Demo notebooks for ImageGPT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ImageGPT). - [`ImageGPTForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ImageGPTConfig [[autodoc]] ImageGPTConfig ## ImageGPTFeatureExtractor [[autodoc]] ImageGPTFeatureExtractor - __call__ ## ImageGPTImageProcessor [[autodoc]] ImageGPTImageProcessor - preprocess ## ImageGPTModel [[autodoc]] ImageGPTModel - forward ## ImageGPTForCausalImageModeling [[autodoc]] ImageGPTForCausalImageModeling - forward ## ImageGPTForImageClassification [[autodoc]] ImageGPTForImageClassification - forward
transformers/docs/source/en/model_doc/imagegpt.md/0
{ "file_path": "transformers/docs/source/en/model_doc/imagegpt.md", "repo_id": "transformers", "token_count": 1915 }
15
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mask2Former ## Overview The Mask2Former model was proposed in [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. Mask2Former is a unified framework for panoptic, instance and semantic segmentation and features significant performance and efficiency improvements over [MaskFormer](maskformer). The abstract from the paper is the following: *Image segmentation groups pixels with different semantics, e.g., category or instance membership. Each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K).* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/mask2former_architecture.jpg" alt="drawing" width="600"/> <small> Mask2Former architecture. Taken from the <a href="https://arxiv.org/abs/2112.01527">original paper.</a> </small> This model was contributed by [Shivalika Singh](https://huggingface.co/shivi) and [Alara Dirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/facebookresearch/Mask2Former). ## Usage tips - Mask2Former uses the same preprocessing and postprocessing steps as [MaskFormer](maskformer). Use [`Mask2FormerImageProcessor`] or [`AutoImageProcessor`] to prepare images and optional targets for the model. - To get the final segmentation, depending on the task, you can call [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`Mask2FormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Mask2Former. - Demo notebooks regarding inference + fine-tuning Mask2Former on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Mask2Former). - Scripts for finetuning [`Mask2Former`] with [`Trainer`] or [Accelerate](https://huggingface.co/docs/accelerate/index) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/instance-segmentation). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it. The resource should ideally demonstrate something new instead of duplicating an existing resource. ## Mask2FormerConfig [[autodoc]] Mask2FormerConfig ## MaskFormer specific outputs [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerModelOutput [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentationOutput ## Mask2FormerModel [[autodoc]] Mask2FormerModel - forward ## Mask2FormerForUniversalSegmentation [[autodoc]] Mask2FormerForUniversalSegmentation - forward ## Mask2FormerImageProcessor [[autodoc]] Mask2FormerImageProcessor - preprocess - encode_inputs - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation
transformers/docs/source/en/model_doc/mask2former.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mask2former.md", "repo_id": "transformers", "token_count": 1301 }
16
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MobileNet V1 ## Overview The MobileNet model was proposed in [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861) by Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam. The abstract from the paper is the following: *We present a class of efficient models called MobileNets for mobile and embedded vision applications. MobileNets are based on a streamlined architecture that uses depth-wise separable convolutions to build light weight deep neural networks. We introduce two simple global hyper-parameters that efficiently trade off between latency and accuracy. These hyper-parameters allow the model builder to choose the right sized model for their application based on the constraints of the problem. We present extensive experiments on resource and accuracy tradeoffs and show strong performance compared to other popular models on ImageNet classification. We then demonstrate the effectiveness of MobileNets across a wide range of applications and use cases including object detection, finegrain classification, face attributes and large scale geo-localization.* This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md). ## Usage tips - The checkpoints are named **mobilenet\_v1\_*depth*\_*size***, for example **mobilenet\_v1\_1.0\_224**, where **1.0** is the depth multiplier (sometimes also referred to as "alpha" or the width multiplier) and **224** is the resolution of the input images the model was trained on. - Even though the checkpoint is trained on images of specific size, the model will work on images of any size. The smallest supported image size is 32x32. - One can use [`MobileNetV1ImageProcessor`] to prepare images for the model. - The available image classification checkpoints are pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). However, the model predicts 1001 classes: the 1000 classes from ImageNet plus an extra “background” class (index 0). - The original TensorFlow checkpoints use different padding rules than PyTorch, requiring the model to determine the padding amount at inference time, since this depends on the input image size. To use native PyTorch padding behavior, create a [`MobileNetV1Config`] with `tf_padding = False`. Unsupported features: - The [`MobileNetV1Model`] outputs a globally pooled version of the last hidden state. In the original model it is possible to use a 7x7 average pooling layer with stride 2 instead of global pooling. For larger inputs, this gives a pooled output that is larger than 1x1 pixel. The HuggingFace implementation does not support this. - It is currently not possible to specify an `output_stride`. For smaller output strides, the original model invokes dilated convolution to prevent the spatial resolution from being reduced further. The output stride of the HuggingFace model is always 32. - The original TensorFlow checkpoints include quantized models. We do not support these models as they include additional "FakeQuantization" operations to unquantize the weights. - It's common to extract the output from the pointwise layers at indices 5, 11, 12, 13 for downstream purposes. Using `output_hidden_states=True` returns the output from all intermediate layers. There is currently no way to limit this to specific layers. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with MobileNetV1. <PipelineTag pipeline="image-classification"/> - [`MobileNetV1ForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## MobileNetV1Config [[autodoc]] MobileNetV1Config ## MobileNetV1FeatureExtractor [[autodoc]] MobileNetV1FeatureExtractor - preprocess ## MobileNetV1ImageProcessor [[autodoc]] MobileNetV1ImageProcessor - preprocess ## MobileNetV1Model [[autodoc]] MobileNetV1Model - forward ## MobileNetV1ForImageClassification [[autodoc]] MobileNetV1ForImageClassification - forward
transformers/docs/source/en/model_doc/mobilenet_v1.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mobilenet_v1.md", "repo_id": "transformers", "token_count": 1403 }
17
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Nemotron ## Nemotron ### License The use of this model is governed by the [NVIDIA AI Foundation Models Community License Agreement](https://developer.nvidia.com/downloads/nv-ai-foundation-models-license). ### Description Nemotron-4 is a family of enterprise ready generative text models compatible with [NVIDIA NeMo Framework](https://www.nvidia.com/en-us/ai-data-science/generative-ai/nemo-framework/). NVIDIA NeMo is an end-to-end, cloud-native platform to build, customize, and deploy generative AI models anywhere. It includes training and inferencing frameworks, guardrailing toolkits, data curation tools, and pretrained models, offering enterprises an easy, cost-effective, and fast way to adopt generative AI. To get access to NeMo Framework, please sign up at [this link](https://developer.nvidia.com/nemo-framework/join). ### References [Announcement Blog](https://developer.nvidia.com/blog/nvidia-ai-foundation-models-build-custom-enterprise-chatbots-and-co-pilots-with-production-ready-llms/) ### Model Architecture **Architecture Type:** Transformer **Network Architecture:** Transformer Decoder (auto-regressive language model). ## Minitron ### Minitron 4B Base Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models. Deriving the Minitron 8B and 4B models from the base 15B model using our approach requires up to **40x fewer training tokens** per model compared to training from scratch; this results in **compute cost savings of 1.8x** for training the full model family (15B, 8B, and 4B). Minitron models exhibit up to a 16% improvement in MMLU scores compared to training from scratch, perform comparably to other community models such as Mistral 7B, Gemma 7B and Llama-3 8B, and outperform state-of-the-art compression techniques from the literature. Please refer to our [arXiv paper](https://arxiv.org/abs/2407.14679) for more details. Minitron models are for research and development only. ### HuggingFace Quickstart The following code provides an example of how to load the Minitron-4B model and use it to perform text generation. ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Load the tokenizer and model model_path = 'nvidia/Minitron-4B-Base' tokenizer = AutoTokenizer.from_pretrained(model_path) device = 'cuda' dtype = torch.bfloat16 model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=dtype, device_map=device) # Prepare the input text prompt = 'Complete the paragraph: our solar system is' inputs = tokenizer.encode(prompt, return_tensors='pt').to(model.device) # Generate the output outputs = model.generate(inputs, max_length=20) # Decode and print the output output_text = tokenizer.decode(outputs[0]) print(output_text) ``` ### License Minitron is released under the [NVIDIA Open Model License Agreement](https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf). ### Evaluation Results *5-shot performance.* Language Understanding evaluated using [Massive Multitask Language Understanding](https://arxiv.org/abs/2009.03300): | Average | | :---- | | 58.6 | *Zero-shot performance.* Evaluated using select datasets from the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) with additions: | HellaSwag | Winogrande | GSM8K| ARC-C | XLSum | | :------------- | :------------- | :------------- | :------------- | :------------- | | 75.0 | 74.0 | 24.1 | 50.9 | 29.5 *Code generation performance*. Evaluated using [HumanEval](https://github.com/openai/human-eval): | p@1, 0-Shot | | :------------- | | 23.3 | Please refer to our [paper](https://arxiv.org/abs/2407.14679) for the full set of results. ### Citation If you find our work helpful, please consider citing our paper: ``` @article{minitron2024, title={Compact Language Models via Pruning and Knowledge Distillation}, author={Saurav Muralidharan and Sharath Turuvekere Sreenivas and Raviraj Joshi and Marcin Chochowski and Mostofa Patwary and Mohammad Shoeybi and Bryan Catanzaro and Jan Kautz and Pavlo Molchanov}, journal={arXiv preprint arXiv:2407.14679}, year={2024}, url={https://arxiv.org/abs/2407.14679}, } ``` ## NemotronConfig [[autodoc]] NemotronConfig ## NemotronModel [[autodoc]] NemotronModel - forward ## NemotronForCausalLM [[autodoc]] NemotronForCausalLM - forward ## NemotronForSequenceClassification [[autodoc]] NemotronForSequenceClassification - forward ## NemotronForQuestionAnswering [[autodoc]] NemotronForQuestionAnswering - forward ## NemotronForTokenClassification [[autodoc]] NemotronForTokenClassification - forward
transformers/docs/source/en/model_doc/nemotron.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nemotron.md", "repo_id": "transformers", "token_count": 1671 }
18
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PaliGemma ## Overview The PaliGemma model was proposed in [PaliGemma – Google's Cutting-Edge Open Vision Language Model](https://huggingface.co/blog/paligemma) by Google. It is a 3B vision-language model composed by a [SigLIP](siglip) vision encoder and a [Gemma](gemma) language decoder linked by a multimodal linear projection. It cuts an image into a fixed number of VIT tokens and prepends it to an optional prompt. One particularity is that the model uses full block attention on all the image tokens plus the input text tokens. It comes in 3 resolutions, 224x224, 448x448 and 896x896 with 3 base models, with 55 fine-tuned versions for different tasks, and 2 mix models. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/paligemma/paligemma_arch.png" alt="drawing" width="600"/> <small> PaliGemma architecture. Taken from the <a href="https://huggingface.co/blog/paligemma">blog post.</a> </small> This model was contributed by [Molbap](https://huggingface.co/Molbap). ## Usage tips - PaliGemma is not meant for conversational use, and it works best when fine-tuning to a specific use case. Some downstream tasks on which PaliGemma can be fine-tuned include image captioning, visual question answering (VQA), object detection, referring expression segmentation and document understanding. - One can use `PaliGemmaProcessor` to prepare images, text and optional labels for the model. When fine-tuning a PaliGemma model, the `suffix` argument can be passed to the processor which creates the `labels` for the model: ```python prompt = "What is on the flower?" answer = "a bee" inputs = processor(images=raw_image, text=prompt, suffix=answer, return_tensors="pt") ``` ## Usage Example The model can accept a single or multiple images. According to the [paper](https://arxiv.org/abs/2407.07726v1), the checkpoint PaliGemma can transfer to tasks which take multiple images as input. NLVR2 is one such task, which asks one question about two images, and requires looking at both to give the correct answer. Here's an example code for single and multi image inference. ### Single-image Inference ```python from transformers import AutoProcessor, PaliGemmaForConditionalGeneration model_id = "google/paligemma-3b-mix-224" model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) processor = AutoProcessor.from_pretrained(model_id) prompt = "What is on the flower?" image_file = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true" raw_image = Image.open(requests.get(image_file, stream=True).raw) inputs = processor(raw_image, prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=20) print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ]) ``` ### Multi-image Inference ```python model_id = "google/paligemma-3b-ft-nlvr2-448" # checkpoint tuned for multiple images model = PaliGemmaForConditionalGeneration.from_pretrained(model_id) processor = PaliGemmaProcessor.from_pretrained(model_id) prompt = "answer en Which of the two pictures shows a snowman, first or second?" stop_sign_image = Image.open( requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw ) snow_image = Image.open( requests.get( "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg", stream=True ).raw ) inputs = processor(images=[[snow_image, stop_sign_image]], text=prompt, return_tensors="pt") output = model.generate(**inputs, max_new_tokens=20) print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ]) ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with PaliGemma. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - A blog post introducing all the features of PaliGemma can be found [here](https://huggingface.co/blog/paligemma). - Demo notebooks on how to fine-tune PaliGemma for VQA with the Trainer API along with inference can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/paligemma). - Demo notebooks on how to fine-tune PaliGemma on a custom dataset (receipt image -> JSON) along with inference can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/PaliGemma). 🌎 ## PaliGemmaConfig [[autodoc]] PaliGemmaConfig ## PaliGemmaProcessor [[autodoc]] PaliGemmaProcessor ## PaliGemmaForConditionalGeneration [[autodoc]] PaliGemmaForConditionalGeneration - forward
transformers/docs/source/en/model_doc/paligemma.md/0
{ "file_path": "transformers/docs/source/en/model_doc/paligemma.md", "repo_id": "transformers", "token_count": 1694 }
19
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ProphetNet <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=prophetnet"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-prophetnet-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/prophetnet-large-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The ProphetNet model was proposed in [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training,](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, Ming Zhou on 13 Jan, 2020. ProphetNet is an encoder-decoder model and can predict n-future tokens for "ngram" language modeling instead of just the next token. The abstract from the paper is the following: *In this paper, we present a new sequence-to-sequence pretraining model called ProphetNet, which introduces a novel self-supervised objective named future n-gram prediction and the proposed n-stream self-attention mechanism. Instead of the optimization of one-step ahead prediction in traditional sequence-to-sequence model, the ProphetNet is optimized by n-step ahead prediction which predicts the next n tokens simultaneously based on previous context tokens at each time step. The future n-gram prediction explicitly encourages the model to plan for the future tokens and prevent overfitting on strong local correlations. We pre-train ProphetNet using a base scale dataset (16GB) and a large scale dataset (160GB) respectively. Then we conduct experiments on CNN/DailyMail, Gigaword, and SQuAD 1.1 benchmarks for abstractive summarization and question generation tasks. Experimental results show that ProphetNet achieves new state-of-the-art results on all these datasets compared to the models using the same scale pretraining corpus.* The Authors' code can be found [here](https://github.com/microsoft/ProphetNet). ## Usage tips - ProphetNet is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism. ## Resources - [Causal language modeling task guide](../tasks/language_modeling) - [Translation task guide](../tasks/translation) - [Summarization task guide](../tasks/summarization) ## ProphetNetConfig [[autodoc]] ProphetNetConfig ## ProphetNetTokenizer [[autodoc]] ProphetNetTokenizer ## ProphetNet specific outputs [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput [[autodoc]] models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput ## ProphetNetModel [[autodoc]] ProphetNetModel - forward ## ProphetNetEncoder [[autodoc]] ProphetNetEncoder - forward ## ProphetNetDecoder [[autodoc]] ProphetNetDecoder - forward ## ProphetNetForConditionalGeneration [[autodoc]] ProphetNetForConditionalGeneration - forward ## ProphetNetForCausalLM [[autodoc]] ProphetNetForCausalLM - forward
transformers/docs/source/en/model_doc/prophetnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/prophetnet.md", "repo_id": "transformers", "token_count": 1169 }
20
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # RetriBERT <Tip warning={true}> This model is in maintenance mode only, so we won't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0. You can do so by running the following command: `pip install -U transformers==4.30.0`. </Tip> ## Overview The RetriBERT model was proposed in the blog post [Explain Anything Like I'm Five: A Model for Open Domain Long Form Question Answering](https://yjernite.github.io/lfqa.html). RetriBERT is a small model that uses either a single or pair of BERT encoders with lower-dimension projection for dense semantic indexing of text. This model was contributed by [yjernite](https://huggingface.co/yjernite). Code to train and use the model can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research-projects/distillation). ## RetriBertConfig [[autodoc]] RetriBertConfig ## RetriBertTokenizer [[autodoc]] RetriBertTokenizer ## RetriBertTokenizerFast [[autodoc]] RetriBertTokenizerFast ## RetriBertModel [[autodoc]] RetriBertModel - forward
transformers/docs/source/en/model_doc/retribert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/retribert.md", "repo_id": "transformers", "token_count": 536 }
21
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Speech2Text ## Overview The Speech2Text model was proposed in [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. It's a transformer-based seq2seq (encoder-decoder) model designed for end-to-end Automatic Speech Recognition (ASR) and Speech Translation (ST). It uses a convolutional downsampler to reduce the length of speech inputs by 3/4th before they are fed into the encoder. The model is trained with standard autoregressive cross-entropy loss and generates the transcripts/translations autoregressively. Speech2Text has been fine-tuned on several datasets for ASR and ST: [LibriSpeech](http://www.openslr.org/12), [CoVoST 2](https://github.com/facebookresearch/covost), [MuST-C](https://ict.fbk.eu/must-c/). This model was contributed by [valhalla](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/master/examples/speech_to_text). ## Inference Speech2Text is a speech model that accepts a float tensor of log-mel filter-bank features extracted from the speech signal. It's a transformer-based seq2seq model, so the transcripts/translations are generated autoregressively. The `generate()` method can be used for inference. The [`Speech2TextFeatureExtractor`] class is responsible for extracting the log-mel filter-bank features. The [`Speech2TextProcessor`] wraps [`Speech2TextFeatureExtractor`] and [`Speech2TextTokenizer`] into a single instance to both extract the input features and decode the predicted token ids. The feature extractor depends on `torchaudio` and the tokenizer depends on `sentencepiece` so be sure to install those packages before running the examples. You could either install those as extra speech dependencies with `pip install transformers"[speech, sentencepiece]"` or install the packages separately with `pip install torchaudio sentencepiece`. Also `torchaudio` requires the development version of the [libsndfile](http://www.mega-nerd.com/libsndfile/) package which can be installed via a system package manager. On Ubuntu it can be installed as follows: `apt install libsndfile1-dev` - ASR and Speech Translation ```python >>> import torch >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration >>> from datasets import load_dataset >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt") >>> generated_ids = model.generate(inputs["input_features"], attention_mask=inputs["attention_mask"]) >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> transcription ['mister quilter is the apostle of the middle classes and we are glad to welcome his gospel'] ``` - Multilingual speech translation For multilingual speech translation models, `eos_token_id` is used as the `decoder_start_token_id` and the target language id is forced as the first generated token. To force the target language id as the first generated token, pass the `forced_bos_token_id` parameter to the `generate()` method. The following example shows how to transate English speech to French text using the *facebook/s2t-medium-mustc-multilingual-st* checkpoint. ```python >>> import torch >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration >>> from datasets import load_dataset >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-medium-mustc-multilingual-st") >>> processor = Speech2TextProcessor.from_pretrained("facebook/s2t-medium-mustc-multilingual-st") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> inputs = processor(ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt") >>> generated_ids = model.generate( ... inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... forced_bos_token_id=processor.tokenizer.lang_code_to_id["fr"], ... ) >>> translation = processor.batch_decode(generated_ids, skip_special_tokens=True) >>> translation ["(Vidéo) Si M. Kilder est l'apossible des classes moyennes, et nous sommes heureux d'être accueillis dans son évangile."] ``` See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look for Speech2Text checkpoints. ## Speech2TextConfig [[autodoc]] Speech2TextConfig ## Speech2TextTokenizer [[autodoc]] Speech2TextTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## Speech2TextFeatureExtractor [[autodoc]] Speech2TextFeatureExtractor - __call__ ## Speech2TextProcessor [[autodoc]] Speech2TextProcessor - __call__ - from_pretrained - save_pretrained - batch_decode - decode <frameworkcontent> <pt> ## Speech2TextModel [[autodoc]] Speech2TextModel - forward ## Speech2TextForConditionalGeneration [[autodoc]] Speech2TextForConditionalGeneration - forward </pt> <tf> ## TFSpeech2TextModel [[autodoc]] TFSpeech2TextModel - call ## TFSpeech2TextForConditionalGeneration [[autodoc]] TFSpeech2TextForConditionalGeneration - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/speech_to_text.md/0
{ "file_path": "transformers/docs/source/en/model_doc/speech_to_text.md", "repo_id": "transformers", "token_count": 1920 }
22
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Table Transformer ## Overview The Table Transformer model was proposed in [PubTables-1M: Towards comprehensive table extraction from unstructured documents](https://arxiv.org/abs/2110.00061) by Brandon Smock, Rohith Pesala, Robin Abraham. The authors introduce a new dataset, PubTables-1M, to benchmark progress in table extraction from unstructured documents, as well as table structure recognition and functional analysis. The authors train 2 [DETR](detr) models, one for table detection and one for table structure recognition, dubbed Table Transformers. The abstract from the paper is the following: *Recently, significant progress has been made applying machine learning to the problem of table structure inference and extraction from unstructured documents. However, one of the greatest challenges remains the creation of datasets with complete, unambiguous ground truth at scale. To address this, we develop a new, more comprehensive dataset for table extraction, called PubTables-1M. PubTables-1M contains nearly one million tables from scientific articles, supports multiple input modalities, and contains detailed header and location information for table structures, making it useful for a wide variety of modeling approaches. It also addresses a significant source of ground truth inconsistency observed in prior datasets called oversegmentation, using a novel canonicalization procedure. We demonstrate that these improvements lead to a significant increase in training performance and a more reliable estimate of model performance at evaluation for table structure recognition. Further, we show that transformer-based object detection models trained on PubTables-1M produce excellent results for all three tasks of detection, structure recognition, and functional analysis without the need for any special customization for these tasks.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/table_transformer_architecture.jpeg" alt="drawing" width="600"/> <small> Table detection and table structure recognition clarified. Taken from the <a href="https://arxiv.org/abs/2110.00061">original paper</a>. </small> The authors released 2 models, one for [table detection](https://huggingface.co/microsoft/table-transformer-detection) in documents, one for [table structure recognition](https://huggingface.co/microsoft/table-transformer-structure-recognition) (the task of recognizing the individual rows, columns etc. in a table). This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/table-transformer). ## Resources <PipelineTag pipeline="object-detection"/> - A demo notebook for the Table Transformer can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Table%20Transformer). - It turns out padding of images is quite important for detection. An interesting Github thread with replies from the authors can be found [here](https://github.com/microsoft/table-transformer/issues/68). ## TableTransformerConfig [[autodoc]] TableTransformerConfig ## TableTransformerModel [[autodoc]] TableTransformerModel - forward ## TableTransformerForObjectDetection [[autodoc]] TableTransformerForObjectDetection - forward
transformers/docs/source/en/model_doc/table-transformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/table-transformer.md", "repo_id": "transformers", "token_count": 978 }
23
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UniSpeech ## Overview The UniSpeech model was proposed in [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang . The abstract from the paper is the following: *In this paper, we propose a unified pre-training approach called UniSpeech to learn speech representations with both unlabeled and labeled data, in which supervised phonetic CTC learning and phonetically-aware contrastive self-supervised learning are conducted in a multi-task learning manner. The resultant representations can capture information more correlated with phonetic structures and improve the generalization across languages and domains. We evaluate the effectiveness of UniSpeech for cross-lingual representation learning on public CommonVoice corpus. The results show that UniSpeech outperforms self-supervised pretraining and supervised transfer learning for speech recognition by a maximum of 13.4% and 17.8% relative phone error rate reductions respectively (averaged over all testing languages). The transferability of UniSpeech is also demonstrated on a domain-shift speech recognition task, i.e., a relative word error rate reduction of 6% against the previous approach.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech). ## Usage tips - UniSpeech is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeech model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechConfig [[autodoc]] UniSpeechConfig ## UniSpeech specific outputs [[autodoc]] models.unispeech.modeling_unispeech.UniSpeechForPreTrainingOutput ## UniSpeechModel [[autodoc]] UniSpeechModel - forward ## UniSpeechForCTC [[autodoc]] UniSpeechForCTC - forward ## UniSpeechForSequenceClassification [[autodoc]] UniSpeechForSequenceClassification - forward ## UniSpeechForPreTraining [[autodoc]] UniSpeechForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech.md", "repo_id": "transformers", "token_count": 853 }
24
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xlm"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xlm-mlm-en-2048"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The XLM model was proposed in [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample, Alexis Conneau. It's a transformer pretrained using one of the following objectives: - a causal language modeling (CLM) objective (next token prediction), - a masked language modeling (MLM) objective (BERT-like), or - a Translation Language Modeling (TLM) object (extension of BERT's MLM to multiple language inputs) The abstract from the paper is the following: *Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding. In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining. We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI, our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation, we obtain 34.3 BLEU on WMT'16 German-English, improving the previous state of the art by more than 9 BLEU. On supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT'16 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/facebookresearch/XLM/). ## Usage tips - XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation). - XLM has multilingual checkpoints which leverage a specific `lang` parameter. Check out the [multi-lingual](../multilingual) page for more information. - A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them: * Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages. * Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens. * A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XLMConfig [[autodoc]] XLMConfig ## XLMTokenizer [[autodoc]] XLMTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLM specific outputs [[autodoc]] models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput <frameworkcontent> <pt> ## XLMModel [[autodoc]] XLMModel - forward ## XLMWithLMHeadModel [[autodoc]] XLMWithLMHeadModel - forward ## XLMForSequenceClassification [[autodoc]] XLMForSequenceClassification - forward ## XLMForMultipleChoice [[autodoc]] XLMForMultipleChoice - forward ## XLMForTokenClassification [[autodoc]] XLMForTokenClassification - forward ## XLMForQuestionAnsweringSimple [[autodoc]] XLMForQuestionAnsweringSimple - forward ## XLMForQuestionAnswering [[autodoc]] XLMForQuestionAnswering - forward </pt> <tf> ## TFXLMModel [[autodoc]] TFXLMModel - call ## TFXLMWithLMHeadModel [[autodoc]] TFXLMWithLMHeadModel - call ## TFXLMForSequenceClassification [[autodoc]] TFXLMForSequenceClassification - call ## TFXLMForMultipleChoice [[autodoc]] TFXLMForMultipleChoice - call ## TFXLMForTokenClassification [[autodoc]] TFXLMForTokenClassification - call ## TFXLMForQuestionAnsweringSimple [[autodoc]] TFXLMForQuestionAnsweringSimple - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/xlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm.md", "repo_id": "transformers", "token_count": 1744 }
25
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines for inference The [`pipeline`] makes it simple to use any model from the [Hub](https://huggingface.co/models) for inference on any language, computer vision, speech, and multimodal tasks. Even if you don't have experience with a specific modality or aren't familiar with the underlying code behind the models, you can still use them for inference with the [`pipeline`]! This tutorial will teach you to: * Use a [`pipeline`] for inference. * Use a specific tokenizer or model. * Use a [`pipeline`] for audio, vision, and multimodal tasks. <Tip> Take a look at the [`pipeline`] documentation for a complete list of supported tasks and available parameters. </Tip> ## Pipeline usage While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains all the task-specific pipelines. The [`pipeline`] automatically loads a default model and a preprocessing class capable of inference for your task. Let's take the example of using the [`pipeline`] for automatic speech recognition (ASR), or speech-to-text. 1. Start by creating a [`pipeline`] and specify the inference task: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. Pass your input to the [`pipeline`]. In the case of speech recognition, this is an audio input file: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` Not the result you had in mind? Check out some of the [most downloaded automatic speech recognition models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) on the Hub to see if you can get a better transcription. Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large-v2) model from OpenAI. Whisper was released 2 years later than Wav2Vec2, and was trained on close to 10x more data. As such, it beats Wav2Vec2 on most downstream benchmarks. It also has the added benefit of predicting punctuation and casing, neither of which are possible with Wav2Vec2. Let's give it a try here to see how it performs. Set `torch_dtype="auto"` to automatically load the most memory-efficient data type the weights are stored in. ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", torch_dtype="auto") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` Now this result looks more accurate! For a deep-dive comparison on Wav2Vec2 vs Whisper, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/asr_models). We really encourage you to check out the Hub for models in different languages, models specialized in your field, and more. You can check out and compare model results directly from your browser on the Hub to see if it fits or handles corner cases better than other ones. And if you don't find a model for your use case, you can always start [training](training) your own! If you have several inputs, you can pass your input as a list: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` Pipelines are great for experimentation as switching from one model to another is trivial; however, there are some ways to optimize them for larger workloads than experimentation. See the following guides that dive into iterating over whole datasets or using pipelines in a webserver: of the docs: * [Using pipelines on a dataset](#using-pipelines-on-a-dataset) * [Using pipelines for a webserver](./pipeline_webserver) ## Parameters [`pipeline`] supports many parameters; some are task specific, and some are general to all pipelines. In general, you can specify parameters anywhere you want: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` Let's check out 3 important ones: ### Device If you use `device=n`, the pipeline automatically puts the model on the specified device. This will work regardless of whether you are using PyTorch or Tensorflow. ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` If the model is too large for a single GPU and you are using PyTorch, you can set `torch_dtype='float16'` to enable FP16 precision inference. Usually this would not cause significant performance drops but make sure you evaluate it on your models! Alternatively, you can set `device_map="auto"` to automatically determine how to load and store the model weights. Using the `device_map` argument requires the 🤗 [Accelerate](https://huggingface.co/docs/accelerate) package: ```bash pip install --upgrade accelerate ``` The following code automatically loads and stores model weights across devices: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` Note that if `device_map="auto"` is passed, there is no need to add the argument `device=device` when instantiating your `pipeline` as you may encounter some unexpected behavior! ### Batch size By default, pipelines will not batch inference for reasons explained in detail [here](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). The reason is that batching is not necessarily faster, and can actually be quite slower in some cases. But if it works in your use case, you can use: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` This runs the pipeline on the 4 provided audio files, but it will pass them in batches of 2 to the model (which is on a GPU, where batching is more likely to help) without requiring any further code from you. The output should always match what you would have received without batching. It is only meant as a way to help you get more speed out of a pipeline. Pipelines can also alleviate some of the complexities of batching because, for some pipelines, a single item (like a long audio file) needs to be chunked into multiple parts to be processed by a model. The pipeline performs this [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) for you. ### Task specific parameters All tasks provide task specific parameters which allow for additional flexibility and options to help you get your job done. For instance, the [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] method has a `return_timestamps` parameter which sounds promising for subtitling videos: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` As you can see, the model inferred the text and also outputted **when** the various sentences were pronounced. There are many parameters available for each task, so check out each task's API reference to see what you can tinker with! For instance, the [`~transformers.AutomaticSpeechRecognitionPipeline`] has a `chunk_length_s` parameter which is helpful for working on really long audio files (for example, subtitling entire movies or hour-long videos) that a model typically cannot handle on its own: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30) >>> transcriber("https://huggingface.co/datasets/reach-vb/random-audios/resolve/main/ted_60.wav") {'text': " So in college, I was a government major, which means I had to write a lot of papers. Now, when a normal student writes a paper, they might spread the work out a little like this. So, you know. You get started maybe a little slowly, but you get enough done in the first week that with some heavier days later on, everything gets done and things stay civil. And I would want to do that like that. That would be the plan. I would have it all ready to go, but then actually the paper would come along, and then I would kind of do this. And that would happen every single paper. But then came my 90-page senior thesis, a paper you're supposed to spend a year on. I knew for a paper like that, my normal workflow was not an option, it was way too big a project. So I planned things out and I decided I kind of had to go something like this. This is how the year would go. So I'd start off light and I'd bump it up"} ``` If you can't find a parameter that would really help you out, feel free to [request it](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## Using pipelines on a dataset The pipeline can also run inference on a large dataset. The easiest way we recommend doing this is by using an iterator: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` The iterator `data()` yields each result, and the pipeline automatically recognizes the input is iterable and will start fetching the data while it continues to process it on the GPU (this uses [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) under the hood). This is important because you don't have to allocate memory for the whole dataset and you can feed the GPU as fast as possible. Since batching could speed things up, it may be useful to try tuning the `batch_size` parameter here. The simplest way to iterate over a dataset is to just load one from 🤗 [Datasets](https://github.com/huggingface/datasets/): ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## Using pipelines for a webserver <Tip> Creating an inference engine is a complex topic which deserves it's own page. </Tip> [Link](./pipeline_webserver) ## Vision pipeline Using a [`pipeline`] for vision tasks is practically identical. Specify your task and pass your image to the classifier. The image can be a link, a local path or a base64-encoded image. For example, what species of cat is shown below? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## Text pipeline Using a [`pipeline`] for NLP tasks is practically identical. ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## Multimodal pipeline The [`pipeline`] supports more than one modality. For example, a visual question answering (VQA) task combines text and image. Feel free to use any image link you like and a question you want to ask about the image. The image can be a URL or a local path to the image. For example, if you use this [invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png): ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> To run the example above you need to have [`pytesseract`](https://pypi.org/project/pytesseract/) installed in addition to 🤗 Transformers: ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## Using `pipeline` on large models with 🤗 `accelerate`: You can easily run `pipeline` on large models using 🤗 `accelerate`! First make sure you have installed `accelerate` with `pip install accelerate`. First load your model using `device_map="auto"`! We will use `facebook/opt-1.3b` for our example. ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` You can also pass 8-bit loaded models if you install `bitsandbytes` and add the argument `load_in_8bit=True` ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` Note that you can replace the checkpoint with any Hugging Face model that supports large model loading, such as BLOOM. ## Creating web demos from pipelines with `gradio` Pipelines are automatically supported in [Gradio](https://github.com/gradio-app/gradio/), a library that makes creating beautiful and user-friendly machine learning apps on the web a breeze. First, make sure you have Gradio installed: ``` pip install gradio ``` Then, you can create a web demo around an image classification pipeline (or any other pipeline) in a single line of code by calling Gradio's [`Interface.from_pipeline`](https://www.gradio.app/docs/interface#interface-from-pipeline) function to launch the pipeline. This creates an intuitive drag-and-drop interface in your browser: ```py from transformers import pipeline import gradio as gr pipe = pipeline("image-classification", model="google/vit-base-patch16-224") gr.Interface.from_pipeline(pipe).launch() ``` ![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/panda-classification.png) By default, the web demo runs on a local server. If you'd like to share it with others, you can generate a temporary public link by setting `share=True` in `launch()`. You can also host your demo on [Hugging Face Spaces](https://huggingface.co/spaces) for a permanent link.
transformers/docs/source/en/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/en/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 5090 }
26
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quantization Quantization techniques focus on representing data with less information while also trying to not lose too much accuracy. This often means converting a data type to represent the same information with fewer bits. For example, if your model weights are stored as 32-bit floating points and they're quantized to 16-bit floating points, this halves the model size which makes it easier to store and reduces memory-usage. Lower precision can also speedup inference because it takes less time to perform calculations with fewer bits. <Tip> Interested in adding a new quantization method to Transformers? Read the [HfQuantizer](./contribute) guide to learn how! </Tip> <Tip> If you are new to the quantization field, we recommend you to check out these beginner-friendly courses about quantization in collaboration with DeepLearning.AI: * [Quantization Fundamentals with Hugging Face](https://www.deeplearning.ai/short-courses/quantization-fundamentals-with-hugging-face/) * [Quantization in Depth](https://www.deeplearning.ai/short-courses/quantization-in-depth/) </Tip> ## When to use what? The community has developed many quantization methods for various use cases. With Transformers, you can run any of these integrated methods depending on your use case because each method has their own pros and cons. For example, some quantization methods require calibrating the model with a dataset for more accurate and "extreme" compression (up to 1-2 bits quantization), while other methods work out of the box with on-the-fly quantization. Another parameter to consider is compatibility with your target device. Do you want to quantize on a CPU, GPU, or Apple silicon? In short, supporting a wide range of quantization methods allows you to pick the best quantization method for your specific use case. Use the table below to help you decide which quantization method to use. | Quantization Method | On the fly quantization | CPU | CUDA GPU | ROCm GPU | Metal (Apple Silicon) | Intel GPU | Torch compile() | Bits | PEFT Fine Tuning | Serializable with 🤗Transformers | 🤗Transformers Support | Link to library | |-----------------------------------------------|----------------------|-----------------|----------|-----------|------------------------------------|-----------------|-----------------|---------------|------------------|-----------------------------|-------------------------|---------------------------------------------| | [AQLM](./aqlm.md) | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1/2 | 🟢 | 🟢 | 🟢 | https://github.com/Vahe1994/AQLM | | [AWQ](./awq.md) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | ? | 4 | 🟢 | 🟢 | 🟢 | https://github.com/casper-hansen/AutoAWQ | | [bitsandbytes](./bitsandbytes.md) | 🟢 | 🟡 <sub>1</sub> | 🟢 | 🟡 <sub>1</sub> | 🔴 <sub>2</sub> | 🟡 <sub>1</sub> | 🔴 <sub>1</sub> | 4/8 | 🟢 | 🟢 | 🟢 | https://github.com/bitsandbytes-foundation/bitsandbytes | | [compressed-tensors](./compressed_tensors.md) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 1/8 | 🟢 | 🟢 | 🟢 | https://github.com/neuralmagic/compressed-tensors | | [EETQ](./eetq.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | ? | 8 | 🟢 | 🟢 | 🟢 | https://github.com/NetEase-FuXi/EETQ | | [GGUF / GGML (llama.cpp)](../gguf.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 1/8 | 🔴 | [See Notes](../gguf.md) | [See Notes](../gguf.md) | https://github.com/ggerganov/llama.cpp | | [GPTQModel](./gptq.md) | 🔴 | 🟢 <sub>3</sub> | 🟢 | 🟢 | 🟢 | 🟢 <sub>4</sub> | 🔴 | 2/3/4/8 | 🟢 | 🟢 | 🟢 | https://github.com/ModelCloud/GPTQModel | | [AutoGPTQ](./gptq.md) | 🔴 | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 2/3/4/8 | 🟢 | 🟢 | 🟢 | https://github.com/AutoGPTQ/AutoGPTQ | | [HIGGS](./higgs.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 2/4 | 🔴 | 🟢 | 🟢 | https://github.com/HanGuo97/flute | | [HQQ](./hqq.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1/8 | 🟢 | 🔴 | 🟢 | https://github.com/mobiusml/hqq/ | | [optimum-quanto](./quanto.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🟢 | 2/4/8 | 🔴 | 🔴 | 🟢 | https://github.com/huggingface/optimum-quanto | | [FBGEMM_FP8](./fbgemm_fp8.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | https://github.com/pytorch/FBGEMM | | [torchao](./torchao.md) | 🟢 | | 🟢 | 🔴 | 🟡 <sub>5</sub> | 🔴 | | 4/8 | | 🟢🔴 | 🟢 | https://github.com/pytorch/ao | | [VPTQ](./vptq.md) | 🔴 | 🔴 | 🟢 | 🟡 | 🔴 | 🔴 | 🟢 | 1/8 | 🔴 | 🟢 | 🟢 | https://github.com/microsoft/VPTQ | <Tip> **1:** bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend). Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links. </Tip> <Tip> **2:** bitsandbytes is seeking contributors to help develop and lead the Apple Silicon backend. Interested? Contact them directly via their repo. Stipends may be available through sponsorships. </Tip> <Tip> **3:** GPTQModel[CPU] supports 4-bit via IPEX on Intel/AMD and full bit range via Torch on Intel/AMD/Apple Silicon. </Tip> <Tip> **4:** GPTQModel[Intel GPU] via IPEX only supports 4-bit for Intel Datacenter Max/Arc GPUs. </Tip> <Tip> **5:** torchao only supports int4 weight on Metal (Apple Silicon). </Tip>
transformers/docs/source/en/quantization/overview.md/0
{ "file_path": "transformers/docs/source/en/quantization/overview.md", "repo_id": "transformers", "token_count": 4883 }
27
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image-text-to-text [[open-in-colab]] Image-text-to-text models, also known as vision language models (VLMs), are language models that take an image input. These models can tackle various tasks, from visual question answering to image segmentation. This task shares many similarities with image-to-text, but with some overlapping use cases like image captioning. Image-to-text models only take image inputs and often accomplish a specific task, whereas VLMs take open-ended text and image inputs and are more generalist models. In this guide, we provide a brief overview of VLMs and show how to use them with Transformers for inference. To begin with, there are multiple types of VLMs: - base models used for fine-tuning - chat fine-tuned models for conversation - instruction fine-tuned models This guide focuses on inference with an instruction-tuned model. Let's begin installing the dependencies. ```bash pip install -q transformers accelerate flash_attn ``` Let's initialize the model and the processor. ```python from transformers import AutoProcessor, AutoModelForImageTextToText import torch device = torch.device("cuda") model = AutoModelForImageTextToText.from_pretrained( "HuggingFaceM4/idefics2-8b", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ).to(device) processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b") ``` This model has a [chat template](./chat_templating) that helps user parse chat outputs. Moreover, the model can also accept multiple images as input in a single conversation or message. We will now prepare the inputs. The image inputs look like the following. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png" alt="Two cats sitting on a net"/> </div> <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" alt="A bee on a pink flower"/> </div> ```python from PIL import Image import requests img_urls =["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"] images = [Image.open(requests.get(img_urls[0], stream=True).raw), Image.open(requests.get(img_urls[1], stream=True).raw)] ``` Below is an example of the chat template. We can feed conversation turns and the last message as an input by appending it at the end of the template. ```python messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What do we see in this image?"}, ] }, { "role": "assistant", "content": [ {"type": "text", "text": "In this image we can see two cats on the nets."}, ] }, { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "And how about this image?"}, ] }, ] ``` We will now call the processors' [`~ProcessorMixin.apply_chat_template`] method to preprocess its output along with the image inputs. ```python prompt = processor.apply_chat_template(messages, add_generation_prompt=True) inputs = processor(text=prompt, images=[images[0], images[1]], return_tensors="pt").to(device) ``` We can now pass the preprocessed inputs to the model. ```python with torch.no_grad(): generated_ids = model.generate(**inputs, max_new_tokens=500) generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True) print(generated_texts) ## ['User: What do we see in this image? \nAssistant: In this image we can see two cats on the nets. \nUser: And how about this image? \nAssistant: In this image we can see flowers, plants and insect.'] ``` ## Pipeline The fastest way to get started is to use the [`Pipeline`] API. Specify the `"image-text-to-text"` task and the model you want to use. ```python from transformers import pipeline pipe = pipeline("image-text-to-text", model="llava-hf/llava-interleave-qwen-0.5b-hf") ``` The example below uses chat templates to format the text inputs. ```python messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg", }, {"type": "text", "text": "Describe this image."}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "There's a pink flower"}, ], }, ] ``` Pass the chat template formatted text and image to [`Pipeline`] and set `return_full_text=False` to remove the input from the generated output. ```python outputs = pipe(text=messages, max_new_tokens=20, return_full_text=False) outputs[0]["generated_text"] # with a yellow center in the foreground. The flower is surrounded by red and white flowers with green stems ``` ## Streaming We can use [text streaming](./generation_strategies#streaming) for a better generation experience. Transformers supports streaming with the [`TextStreamer`] or [`TextIteratorStreamer`] classes. We will use the [`TextIteratorStreamer`] with IDEFICS-8B. Assume we have an application that keeps chat history and takes in the new user input. We will preprocess the inputs as usual and initialize [`TextIteratorStreamer`] to handle the generation in a separate thread. This allows you to stream the generated text tokens in real-time. Any generation arguments can be passed to [`TextIteratorStreamer`]. ```python import time from transformers import TextIteratorStreamer from threading import Thread def model_inference( user_prompt, chat_history, max_new_tokens, images ): user_prompt = { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": user_prompt}, ] } chat_history.append(user_prompt) streamer = TextIteratorStreamer( processor.tokenizer, skip_prompt=True, timeout=5.0, ) generation_args = { "max_new_tokens": max_new_tokens, "streamer": streamer, "do_sample": False } # add_generation_prompt=True makes model generate bot response prompt = processor.apply_chat_template(chat_history, add_generation_prompt=True) inputs = processor( text=prompt, images=images, return_tensors="pt", ).to(device) generation_args.update(inputs) thread = Thread( target=model.generate, kwargs=generation_args, ) thread.start() acc_text = "" for text_token in streamer: time.sleep(0.04) acc_text += text_token if acc_text.endswith("<end_of_utterance>"): acc_text = acc_text[:-18] yield acc_text thread.join() ``` Now let's call the `model_inference` function we created and stream the values. ```python generator = model_inference( user_prompt="And what is in this image?", chat_history=messages[:2], max_new_tokens=100, images=images ) for value in generator: print(value) # In # In this # In this image ... ``` ## Fit models in smaller hardware VLMs are often large and need to be optimized to fit on smaller hardware. Transformers supports many model quantization libraries, and here we will only show int8 quantization with [Quanto](./quantization/quanto#quanto). int8 quantization offers memory improvements up to 75 percent (if all weights are quantized). However it is no free lunch, since 8-bit is not a CUDA-native precision, the weights are quantized back and forth on the fly, which adds up to latency. First, install dependencies. ```bash pip install -U quanto bitsandbytes ``` To quantize a model during loading, we need to first create [`QuantoConfig`]. Then load the model as usual, but pass `quantization_config` during model initialization. ```python from transformers import AutoModelForImageTextToText, QuantoConfig model_id = "HuggingFaceM4/idefics2-8b" quantization_config = QuantoConfig(weights="int8") quantized_model = AutoModelForImageTextToText.from_pretrained( model_id, device_map="cuda", quantization_config=quantization_config ) ``` And that's it, we can use the model the same way with no changes. ## Further Reading Here are some more resources for the image-text-to-text task. - [Image-text-to-text task page](https://huggingface.co/tasks/image-text-to-text) covers model types, use cases, datasets, and more. - [Vision Language Models Explained](https://huggingface.co/blog/vlms) is a blog post that covers everything about vision language models and supervised fine-tuning using [TRL](https://huggingface.co/docs/trl/en/index).
transformers/docs/source/en/tasks/image_text_to_text.md/0
{ "file_path": "transformers/docs/source/en/tasks/image_text_to_text.md", "repo_id": "transformers", "token_count": 3285 }
28
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Token classification [[open-in-colab]] <Youtube id="wVHdVlPScxA"/> Token classification assigns a label to individual tokens in a sentence. One of the most common token classification tasks is Named Entity Recognition (NER). NER attempts to find a label for each entity in a sentence, such as a person, location, or organization. This guide will show you how to: 1. Finetune [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) on the [WNUT 17](https://huggingface.co/datasets/wnut_17) dataset to detect new entities. 2. Use your finetuned model for inference. <Tip> To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/token-classification). </Tip> Before you begin, make sure you have all the necessary libraries installed: ```bash pip install transformers datasets evaluate seqeval ``` We encourage you to login to your Hugging Face account so you can upload and share your model with the community. When prompted, enter your token to login: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load WNUT 17 dataset Start by loading the WNUT 17 dataset from the 🤗 Datasets library: ```py >>> from datasets import load_dataset >>> wnut = load_dataset("wnut_17") ``` Then take a look at an example: ```py >>> wnut["train"][0] {'id': '0', 'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0], 'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.'] } ``` Each number in `ner_tags` represents an entity. Convert the numbers to their label names to find out what the entities are: ```py >>> label_list = wnut["train"].features[f"ner_tags"].feature.names >>> label_list [ "O", "B-corporation", "I-corporation", "B-creative-work", "I-creative-work", "B-group", "I-group", "B-location", "I-location", "B-person", "I-person", "B-product", "I-product", ] ``` The letter that prefixes each `ner_tag` indicates the token position of the entity: - `B-` indicates the beginning of an entity. - `I-` indicates a token is contained inside the same entity (for example, the `State` token is a part of an entity like `Empire State Building`). - `0` indicates the token doesn't correspond to any entity. ## Preprocess <Youtube id="iY2AZYdZAr0"/> The next step is to load a DistilBERT tokenizer to preprocess the `tokens` field: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` As you saw in the example `tokens` field above, it looks like the input has already been tokenized. But the input actually hasn't been tokenized yet and you'll need to set `is_split_into_words=True` to tokenize the words into subwords. For example: ```py >>> example = wnut["train"][0] >>> tokenized_input = tokenizer(example["tokens"], is_split_into_words=True) >>> tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"]) >>> tokens ['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]'] ``` However, this adds some special tokens `[CLS]` and `[SEP]` and the subword tokenization creates a mismatch between the input and labels. A single word corresponding to a single label may now be split into two subwords. You'll need to realign the tokens and labels by: 1. Mapping all tokens to their corresponding word with the [`word_ids`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.BatchEncoding.word_ids) method. 2. Assigning the label `-100` to the special tokens `[CLS]` and `[SEP]` so they're ignored by the PyTorch loss function (see [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)). 3. Only labeling the first token of a given word. Assign `-100` to other subtokens from the same word. Here is how you can create a function to realign the tokens and labels, and truncate sequences to be no longer than DistilBERT's maximum input length: ```py >>> def tokenize_and_align_labels(examples): ... tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) ... labels = [] ... for i, label in enumerate(examples[f"ner_tags"]): ... word_ids = tokenized_inputs.word_ids(batch_index=i) # Map tokens to their respective word. ... previous_word_idx = None ... label_ids = [] ... for word_idx in word_ids: # Set the special tokens to -100. ... if word_idx is None: ... label_ids.append(-100) ... elif word_idx != previous_word_idx: # Only label the first token of a given word. ... label_ids.append(label[word_idx]) ... else: ... label_ids.append(-100) ... previous_word_idx = word_idx ... labels.append(label_ids) ... tokenized_inputs["labels"] = labels ... return tokenized_inputs ``` To apply the preprocessing function over the entire dataset, use 🤗 Datasets [`~datasets.Dataset.map`] function. You can speed up the `map` function by setting `batched=True` to process multiple elements of the dataset at once: ```py >>> tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True) ``` Now create a batch of examples using [`DataCollatorWithPadding`]. It's more efficient to *dynamically pad* the sentences to the longest length in a batch during collation, instead of padding the whole dataset to the maximum length. <frameworkcontent> <pt> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) ``` </pt> <tf> ```py >>> from transformers import DataCollatorForTokenClassification >>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") ``` </tf> </frameworkcontent> ## Evaluate Including a metric during training is often helpful for evaluating your model's performance. You can quickly load a evaluation method with the 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [seqeval](https://huggingface.co/spaces/evaluate-metric/seqeval) framework (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric). Seqeval actually produces several scores: precision, recall, F1, and accuracy. ```py >>> import evaluate >>> seqeval = evaluate.load("seqeval") ``` Get the NER labels first, and then create a function that passes your true predictions and true labels to [`~evaluate.EvaluationModule.compute`] to calculate the scores: ```py >>> import numpy as np >>> labels = [label_list[i] for i in example[f"ner_tags"]] >>> def compute_metrics(p): ... predictions, labels = p ... predictions = np.argmax(predictions, axis=2) ... true_predictions = [ ... [label_list[p] for (p, l) in zip(prediction, label) if l != -100] ... for prediction, label in zip(predictions, labels) ... ] ... true_labels = [ ... [label_list[l] for (p, l) in zip(prediction, label) if l != -100] ... for prediction, label in zip(predictions, labels) ... ] ... results = seqeval.compute(predictions=true_predictions, references=true_labels) ... return { ... "precision": results["overall_precision"], ... "recall": results["overall_recall"], ... "f1": results["overall_f1"], ... "accuracy": results["overall_accuracy"], ... } ``` Your `compute_metrics` function is ready to go now, and you'll return to it when you setup your training. ## Train Before you start training your model, create a map of the expected ids to their labels with `id2label` and `label2id`: ```py >>> id2label = { ... 0: "O", ... 1: "B-corporation", ... 2: "I-corporation", ... 3: "B-creative-work", ... 4: "I-creative-work", ... 5: "B-group", ... 6: "I-group", ... 7: "B-location", ... 8: "I-location", ... 9: "B-person", ... 10: "I-person", ... 11: "B-product", ... 12: "I-product", ... } >>> label2id = { ... "O": 0, ... "B-corporation": 1, ... "I-corporation": 2, ... "B-creative-work": 3, ... "I-creative-work": 4, ... "B-group": 5, ... "I-group": 6, ... "B-location": 7, ... "I-location": 8, ... "B-person": 9, ... "I-person": 10, ... "B-product": 11, ... "I-product": 12, ... } ``` <frameworkcontent> <pt> <Tip> If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#train-with-pytorch-trainer)! </Tip> You're ready to start training your model now! Load DistilBERT with [`AutoModelForTokenClassification`] along with the number of expected labels, and the label mappings: ```py >>> from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer >>> model = AutoModelForTokenClassification.from_pretrained( ... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id ... ) ``` At this point, only three steps remain: 1. Define your training hyperparameters in [`TrainingArguments`]. The only required parameter is `output_dir` which specifies where to save your model. You'll push this model to the Hub by setting `push_to_hub=True` (you need to be signed in to Hugging Face to upload your model). At the end of each epoch, the [`Trainer`] will evaluate the seqeval scores and save the training checkpoint. 2. Pass the training arguments to [`Trainer`] along with the model, dataset, tokenizer, data collator, and `compute_metrics` function. 3. Call [`~Trainer.train`] to finetune your model. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_wnut_model", ... learning_rate=2e-5, ... per_device_train_batch_size=16, ... per_device_eval_batch_size=16, ... num_train_epochs=2, ... weight_decay=0.01, ... eval_strategy="epoch", ... save_strategy="epoch", ... load_best_model_at_end=True, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=tokenized_wnut["train"], ... eval_dataset=tokenized_wnut["test"], ... processing_class=tokenizer, ... data_collator=data_collator, ... compute_metrics=compute_metrics, ... ) >>> trainer.train() ``` Once training is completed, share your model to the Hub with the [`~transformers.Trainer.push_to_hub`] method so everyone can use your model: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> If you aren't familiar with finetuning a model with Keras, take a look at the basic tutorial [here](../training#train-a-tensorflow-model-with-keras)! </Tip> To finetune a model in TensorFlow, start by setting up an optimizer function, learning rate schedule, and some training hyperparameters: ```py >>> from transformers import create_optimizer >>> batch_size = 16 >>> num_train_epochs = 3 >>> num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs >>> optimizer, lr_schedule = create_optimizer( ... init_lr=2e-5, ... num_train_steps=num_train_steps, ... weight_decay_rate=0.01, ... num_warmup_steps=0, ... ) ``` Then you can load DistilBERT with [`TFAutoModelForTokenClassification`] along with the number of expected labels, and the label mappings: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained( ... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id ... ) ``` Convert your datasets to the `tf.data.Dataset` format with [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]: ```py >>> tf_train_set = model.prepare_tf_dataset( ... tokenized_wnut["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_validation_set = model.prepare_tf_dataset( ... tokenized_wnut["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). Note that Transformers models all have a default task-relevant loss function, so you don't need to specify one unless you want to: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # No loss argument! ``` The last two things to setup before you start training is to compute the seqeval scores from the predictions, and provide a way to push your model to the Hub. Both are done by using [Keras callbacks](../main_classes/keras_callbacks). Pass your `compute_metrics` function to [`~transformers.KerasMetricCallback`]: ```py >>> from transformers.keras_callbacks import KerasMetricCallback >>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set) ``` Specify where to push your model and tokenizer in the [`~transformers.PushToHubCallback`]: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> push_to_hub_callback = PushToHubCallback( ... output_dir="my_awesome_wnut_model", ... tokenizer=tokenizer, ... ) ``` Then bundle your callbacks together: ```py >>> callbacks = [metric_callback, push_to_hub_callback] ``` Finally, you're ready to start training your model! Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) with your training and validation datasets, the number of epochs, and your callbacks to finetune the model: ```py >>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks) ``` Once training is completed, your model is automatically uploaded to the Hub so everyone can use it! </tf> </frameworkcontent> <Tip> For a more in-depth example of how to finetune a model for token classification, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb) or [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). </Tip> ## Inference Great, now that you've finetuned a model, you can use it for inference! Grab some text you'd like to run inference on: ```py >>> text = "The Golden State Warriors are an American professional basketball team based in San Francisco." ``` The simplest way to try out your finetuned model for inference is to use it in a [`pipeline`]. Instantiate a `pipeline` for NER with your model, and pass your text to it: ```py >>> from transformers import pipeline >>> classifier = pipeline("ner", model="stevhliu/my_awesome_wnut_model") >>> classifier(text) [{'entity': 'B-location', 'score': 0.42658573, 'index': 2, 'word': 'golden', 'start': 4, 'end': 10}, {'entity': 'I-location', 'score': 0.35856336, 'index': 3, 'word': 'state', 'start': 11, 'end': 16}, {'entity': 'B-group', 'score': 0.3064001, 'index': 4, 'word': 'warriors', 'start': 17, 'end': 25}, {'entity': 'B-location', 'score': 0.65523505, 'index': 13, 'word': 'san', 'start': 80, 'end': 83}, {'entity': 'B-location', 'score': 0.4668663, 'index': 14, 'word': 'francisco', 'start': 84, 'end': 93}] ``` You can also manually replicate the results of the `pipeline` if you'd like: <frameworkcontent> <pt> Tokenize the text and return PyTorch tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model") >>> inputs = tokenizer(text, return_tensors="pt") ``` Pass your inputs to the model and return the `logits`: ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model") >>> with torch.no_grad(): ... logits = model(**inputs).logits ``` Get the class with the highest probability, and use the model's `id2label` mapping to convert it to a text label: ```py >>> predictions = torch.argmax(logits, dim=2) >>> predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]] >>> predicted_token_class ['O', 'O', 'B-location', 'I-location', 'B-group', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-location', 'B-location', 'O', 'O'] ``` </pt> <tf> Tokenize the text and return TensorFlow tensors: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model") >>> inputs = tokenizer(text, return_tensors="tf") ``` Pass your inputs to the model and return the `logits`: ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model") >>> logits = model(**inputs).logits ``` Get the class with the highest probability, and use the model's `id2label` mapping to convert it to a text label: ```py >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> predicted_token_class = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_token_class ['O', 'O', 'B-location', 'I-location', 'B-group', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'O', 'B-location', 'B-location', 'O', 'O'] ``` </tf> </frameworkcontent>
transformers/docs/source/en/tasks/token_classification.md/0
{ "file_path": "transformers/docs/source/en/tasks/token_classification.md", "repo_id": "transformers", "token_count": 6507 }
29
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Troubleshoot Sometimes errors occur, but we are here to help! This guide covers some of the most common issues we've seen and how you can resolve them. However, this guide isn't meant to be a comprehensive collection of every 🤗 Transformers issue. For more help with troubleshooting your issue, try: <Youtube id="S2EEG3JIt2A"/> 1. Asking for help on the [forums](https://discuss.huggingface.co/). There are specific categories you can post your question to, like [Beginners](https://discuss.huggingface.co/c/beginners/5) or [🤗 Transformers](https://discuss.huggingface.co/c/transformers/9). Make sure you write a good descriptive forum post with some reproducible code to maximize the likelihood that your problem is solved! <Youtube id="_PAli-V4wj0"/> 2. Create an [Issue](https://github.com/huggingface/transformers/issues/new/choose) on the 🤗 Transformers repository if it is a bug related to the library. Try to include as much information describing the bug as possible to help us better figure out what's wrong and how we can fix it. 3. Check the [Migration](migration) guide if you use an older version of 🤗 Transformers since some important changes have been introduced between versions. For more details about troubleshooting and getting help, take a look at [Chapter 8](https://huggingface.co/course/chapter8/1?fw=pt) of the Hugging Face course. ## Firewalled environments Some GPU instances on cloud and intranet setups are firewalled to external connections, resulting in a connection error. When your script attempts to download model weights or datasets, the download will hang and then timeout with the following message: ``` ValueError: Connection error, and we cannot find the requested files in the cached path. Please try again or make sure your Internet connection is on. ``` In this case, you should try to run 🤗 Transformers on [offline mode](installation#offline-mode) to avoid the connection error. ## CUDA out of memory Training large models with millions of parameters can be challenging without the appropriate hardware. A common error you may encounter when the GPU runs out of memory is: ``` CUDA out of memory. Tried to allocate 256.00 MiB (GPU 0; 11.17 GiB total capacity; 9.70 GiB already allocated; 179.81 MiB free; 9.85 GiB reserved in total by PyTorch) ``` Here are some potential solutions you can try to lessen memory use: - Reduce the [`per_device_train_batch_size`](main_classes/trainer#transformers.TrainingArguments.per_device_train_batch_size) value in [`TrainingArguments`]. - Try using [`gradient_accumulation_steps`](main_classes/trainer#transformers.TrainingArguments.gradient_accumulation_steps) in [`TrainingArguments`] to effectively increase overall batch size. <Tip> Refer to the Performance [guide](performance) for more details about memory-saving techniques. </Tip> ## Unable to load a saved TensorFlow model TensorFlow's [model.save](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) method will save the entire model - architecture, weights, training configuration - in a single file. However, when you load the model file again, you may run into an error because 🤗 Transformers may not load all the TensorFlow-related objects in the model file. To avoid issues with saving and loading TensorFlow models, we recommend you: - Save the model weights as a `h5` file extension with [`model.save_weights`](https://www.tensorflow.org/tutorials/keras/save_and_load#save_the_entire_model) and then reload the model with [`~TFPreTrainedModel.from_pretrained`]: ```py >>> from transformers import TFPreTrainedModel >>> from tensorflow import keras >>> model.save_weights("some_folder/tf_model.h5") >>> model = TFPreTrainedModel.from_pretrained("some_folder") ``` - Save the model with [`~TFPretrainedModel.save_pretrained`] and load it again with [`~TFPreTrainedModel.from_pretrained`]: ```py >>> from transformers import TFPreTrainedModel >>> model.save_pretrained("path_to/model") >>> model = TFPreTrainedModel.from_pretrained("path_to/model") ``` ## ImportError Another common error you may encounter, especially if it is a newly released model, is `ImportError`: ``` ImportError: cannot import name 'ImageGPTImageProcessor' from 'transformers' (unknown location) ``` For these error types, check to make sure you have the latest version of 🤗 Transformers installed to access the most recent models: ```bash pip install transformers --upgrade ``` ## CUDA error: device-side assert triggered Sometimes you may run into a generic CUDA error about an error in the device code. ``` RuntimeError: CUDA error: device-side assert triggered ``` You should try to run the code on a CPU first to get a more descriptive error message. Add the following environment variable to the beginning of your code to switch to a CPU: ```py >>> import os >>> os.environ["CUDA_VISIBLE_DEVICES"] = "" ``` Another option is to get a better traceback from the GPU. Add the following environment variable to the beginning of your code to get the traceback to point to the source of the error: ```py >>> import os >>> os.environ["CUDA_LAUNCH_BLOCKING"] = "1" ``` ## Incorrect output when padding tokens aren't masked In some cases, the output `hidden_state` may be incorrect if the `input_ids` include padding tokens. To demonstrate, load a model and tokenizer. You can access a model's `pad_token_id` to see its value. The `pad_token_id` may be `None` for some models, but you can always manually set it. ```py >>> from transformers import AutoModelForSequenceClassification >>> import torch >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-base-uncased") >>> model.config.pad_token_id 0 ``` The following example shows the output without masking the padding tokens: ```py >>> input_ids = torch.tensor([[7592, 2057, 2097, 2393, 9611, 2115], [7592, 0, 0, 0, 0, 0]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [ 0.1317, -0.1683]], grad_fn=<AddmmBackward0>) ``` Here is the actual output of the second sequence: ```py >>> input_ids = torch.tensor([[7592]]) >>> output = model(input_ids) >>> print(output.logits) tensor([[-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` Most of the time, you should provide an `attention_mask` to your model to ignore the padding tokens to avoid this silent error. Now the output of the second sequence matches its actual output: <Tip> By default, the tokenizer creates an `attention_mask` for you based on your specific tokenizer's defaults. </Tip> ```py >>> attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0]]) >>> output = model(input_ids, attention_mask=attention_mask) >>> print(output.logits) tensor([[ 0.0082, -0.2307], [-0.1008, -0.4061]], grad_fn=<AddmmBackward0>) ``` 🤗 Transformers doesn't automatically create an `attention_mask` to mask a padding token if it is provided because: - Some models don't have a padding token. - For some use-cases, users want a model to attend to a padding token. ## ValueError: Unrecognized configuration class XYZ for this kind of AutoModel Generally, we recommend using the [`AutoModel`] class to load pretrained instances of models. This class can automatically infer and load the correct architecture from a given checkpoint based on the configuration. If you see this `ValueError` when loading a model from a checkpoint, this means the Auto class couldn't find a mapping from the configuration in the given checkpoint to the kind of model you are trying to load. Most commonly, this happens when a checkpoint doesn't support a given task. For instance, you'll see this error in the following example because there is no GPT2 for question answering: ```py >>> from transformers import AutoProcessor, AutoModelForQuestionAnswering >>> processor = AutoProcessor.from_pretrained("openai-community/gpt2-medium") >>> model = AutoModelForQuestionAnswering.from_pretrained("openai-community/gpt2-medium") ValueError: Unrecognized configuration class <class 'transformers.models.gpt2.configuration_gpt2.GPT2Config'> for this kind of AutoModel: AutoModelForQuestionAnswering. Model type should be one of AlbertConfig, BartConfig, BertConfig, BigBirdConfig, BigBirdPegasusConfig, BloomConfig, ... ```
transformers/docs/source/en/troubleshooting.md/0
{ "file_path": "transformers/docs/source/en/troubleshooting.md", "repo_id": "transformers", "token_count": 2569 }
30
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Exportar modelos 🤗 Transformers Si necesitas implementar modelos 🤗 Transformers en entornos de producción, te recomendamos exportarlos a un formato serializado que se pueda cargar y ejecutar en tiempos de ejecución y hardware especializados. En esta guía, te mostraremos cómo exportar modelos 🤗 Transformers en dos formatos ampliamente utilizados: ONNX y TorchScript. Una vez exportado, un modelo puede optimizarse para la inferencia a través de técnicas como la cuantización y _pruning_. Si estás interesado en optimizar tus modelos para que funcionen con la máxima eficiencia, consulta la [biblioteca de 🤗 Optimum](https://github.com/huggingface/optimum). ## ONNX El proyecto [ONNX (Open Neural Network eXchange)](http://onnx.ai) es un estándar abierto que define un conjunto común de operadores y un formato de archivo común para representar modelos de aprendizaje profundo en una amplia variedad de _frameworks_, incluidos PyTorch y TensorFlow. Cuando un modelo se exporta al formato ONNX, estos operadores se usan para construir un grafo computacional (a menudo llamado _representación intermedia_) que representa el flujo de datos a través de la red neuronal. Al exponer un grafo con operadores y tipos de datos estandarizados, ONNX facilita el cambio entre frameworks. Por ejemplo, un modelo entrenado en PyTorch se puede exportar a formato ONNX y luego importar en TensorFlow (y viceversa). 🤗 Transformers proporciona un paquete llamado `transformers.onnx`, el cual permite convertir los checkpoints de un modelo en un grafo ONNX aprovechando los objetos de configuración. Estos objetos de configuración están hechos a la medida de diferentes arquitecturas de modelos y están diseñados para ser fácilmente extensibles a otras arquitecturas. Las configuraciones a la medida incluyen las siguientes arquitecturas: <!--This table is automatically generated by `make fix-copies`, do not fill manually!--> - ALBERT - BART - BEiT - BERT - BigBird - BigBird-Pegasus - Blenderbot - BlenderbotSmall - BLOOM - CamemBERT - CLIP - CodeGen - ConvBERT - ConvNeXT - ConvNeXTV2 - Data2VecText - Data2VecVision - DeBERTa - DeBERTa-v2 - DeiT - DETR - DistilBERT - ELECTRA - FlauBERT - GPT Neo - GPT-J - I-BERT - LayoutLM - LayoutLMv3 - LeViT - LongT5 - M2M100 - Marian - mBART - MobileBERT - MobileViT - MT5 - OpenAI GPT-2 - Perceiver - PLBart - ResNet - RoBERTa - RoFormer - SqueezeBERT - T5 - ViT - XLM - XLM-RoBERTa - XLM-RoBERTa-XL - YOLOS En las próximas dos secciones, te mostraremos cómo: * Exportar un modelo compatible utilizando el paquete `transformers.onnx`. * Exportar un modelo personalizado para una arquitectura no compatible. ### Exportar un model a ONNX Para exportar un modelo 🤗 Transformers a ONNX, tienes que instalar primero algunas dependencias extra: ```bash pip install transformers[onnx] ``` El paquete `transformers.onnx` puede ser usado luego como un módulo de Python: ```bash python -m transformers.onnx --help usage: Hugging Face Transformers ONNX exporter [-h] -m MODEL [--feature {causal-lm, ...}] [--opset OPSET] [--atol ATOL] output positional arguments: output Path indicating where to store generated ONNX model. optional arguments: -h, --help show this help message and exit -m MODEL, --model MODEL Model ID on huggingface.co or path on disk to load model from. --feature {causal-lm, ...} The type of features to export the model with. --opset OPSET ONNX opset version to export the model with. --atol ATOL Absolute difference tolerence when validating the model. ``` Exportar un checkpoint usando una configuración a la medida se puede hacer de la siguiente manera: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased onnx/ ``` que debería mostrar los siguientes registros: ```bash Validating ONNX model... -[✓] ONNX model output names match reference model ({'last_hidden_state'}) - Validating ONNX Model output "last_hidden_state": -[✓] (2, 8, 768) matches (2, 8, 768) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx ``` Esto exporta un grafo ONNX del checkpoint definido por el argumento `--model`. En este ejemplo, es un modelo `distilbert/distilbert-base-uncased`, pero puede ser cualquier checkpoint en Hugging Face Hub o que esté almacenado localmente. El archivo `model.onnx` resultante se puede ejecutar en uno de los [muchos aceleradores](https://onnx.ai/supported-tools.html#deployModel) que admiten el estándar ONNX. Por ejemplo, podemos cargar y ejecutar el modelo con [ONNX Runtime](https://onnxruntime.ai/) de la siguiente manera: ```python >>> from transformers import AutoTokenizer >>> from onnxruntime import InferenceSession >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> session = InferenceSession("onnx/model.onnx") >>> # ONNX Runtime expects NumPy arrays as input >>> inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") >>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) ``` Los nombres necesarios de salida (es decir, `["last_hidden_state"]`) se pueden obtener echando un vistazo a la configuración ONNX de cada modelo. Por ejemplo, para DistilBERT tenemos: ```python >>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig >>> config = DistilBertConfig() >>> onnx_config = DistilBertOnnxConfig(config) >>> print(list(onnx_config.outputs.keys())) ["last_hidden_state"]s ``` El proceso es idéntico para los checkpoints de TensorFlow en Hub. Por ejemplo, podemos exportar un checkpoint puro de TensorFlow desde [Keras](https://huggingface.co/keras-io) de la siguiente manera: ```bash python -m transformers.onnx --model=keras-io/transformers-qa onnx/ ``` Para exportar un modelo que está almacenado localmente, deberás tener los pesos y tokenizadores del modelo almacenados en un directorio. Por ejemplo, podemos cargar y guardar un checkpoint de la siguiente manera: <frameworkcontent> <pt> ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> # Load tokenizer and PyTorch weights form the Hub >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> pt_model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") >>> # Save to disk >>> tokenizer.save_pretrained("local-pt-checkpoint") >>> pt_model.save_pretrained("local-pt-checkpoint") ``` Una vez que se guarda el checkpoint, podemos exportarlo a ONNX usando el argumento `--model` del paquete `transformers.onnx` al directorio deseado: ```bash python -m transformers.onnx --model=local-pt-checkpoint onnx/ ``` </pt> <tf> ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> # Load tokenizer and TensorFlow weights from the Hub >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") >>> # Save to disk >>> tokenizer.save_pretrained("local-tf-checkpoint") >>> tf_model.save_pretrained("local-tf-checkpoint") ``` Una vez que se guarda el checkpoint, podemos exportarlo a ONNX usando el argumento `--model` del paquete `transformers.onnx` al directorio deseado: ```bash python -m transformers.onnx --model=local-tf-checkpoint onnx/ ``` </tf> </frameworkcontent> ### Seleccionar características para diferentes topologías de un modelo Cada configuración a la medida viene con un conjunto de _características_ que te permiten exportar modelos para diferentes tipos de topologías o tareas. Como se muestra en la siguiente tabla, cada función está asociada con una auto-clase de automóvil diferente: | Feature | Auto Class | | ------------------------------------ | ------------------------------------ | | `causal-lm`, `causal-lm-with-past` | `AutoModelForCausalLM` | | `default`, `default-with-past` | `AutoModel` | | `masked-lm` | `AutoModelForMaskedLM` | | `question-answering` | `AutoModelForQuestionAnswering` | | `seq2seq-lm`, `seq2seq-lm-with-past` | `AutoModelForSeq2SeqLM` | | `sequence-classification` | `AutoModelForSequenceClassification` | | `token-classification` | `AutoModelForTokenClassification` | Para cada configuración, puedes encontrar la lista de funciones admitidas a través de `FeaturesManager`. Por ejemplo, para DistilBERT tenemos: ```python >>> from transformers.onnx.features import FeaturesManager >>> distilbert_features = list(FeaturesManager.get_supported_features_for_model_type("distilbert").keys()) >>> print(distilbert_features) ["default", "masked-lm", "causal-lm", "sequence-classification", "token-classification", "question-answering"] ``` Le puedes pasar una de estas características al argumento `--feature` en el paquete `transformers.onnx`. Por ejemplo, para exportar un modelo de clasificación de texto, podemos elegir un modelo ya ajustado del Hub y ejecutar: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased-finetuned-sst-2-english \ --feature=sequence-classification onnx/ ``` que mostrará los siguientes registros: ```bash Validating ONNX model... -[✓] ONNX model output names match reference model ({'logits'}) - Validating ONNX Model output "logits": -[✓] (2, 2) matches (2, 2) -[✓] all values close (atol: 1e-05) All good, model saved at: onnx/model.onnx ``` Ten en cuenta que, en este caso, los nombres de salida del modelo ajustado son `logits` en lugar de `last_hidden_state` que vimos anteriormente con el checkpoint `distilbert/distilbert-base-uncased`. Esto es de esperarse ya que el modelo ajustado tiene un cabezal de clasificación secuencial. <Tip> Las características que tienen un sufijo 'with-past' (por ejemplo, 'causal-lm-with-past') corresponden a topologías de modelo con estados ocultos precalculados (clave y valores en los bloques de atención) que se pueden usar para una decodificación autorregresiva más rápida. </Tip> ### Exportar un modelo para una arquitectura no compatible Si deseas exportar un modelo cuya arquitectura no es compatible de forma nativa con la biblioteca, debes seguir tres pasos principales: 1. Implementa una configuración personalizada en ONNX. 2. Exporta el modelo a ONNX. 3. Valide los resultados de PyTorch y los modelos exportados. En esta sección, veremos cómo se implementó la serialización de DistilBERT para mostrar lo que implica cada paso. #### Implementar una configuración personalizada en ONNX Comencemos con el objeto de configuración de ONNX. Proporcionamos tres clases abstractas de las que debe heredar, según el tipo de arquitectura del modelo que quieras exportar: * Modelos basados en el _Encoder_ inherente de [`~onnx.config.OnnxConfig`] * Modelos basados en el _Decoder_ inherente de [`~onnx.config.OnnxConfigWithPast`] * Modelos _Encoder-decoder_ inherente de [`~onnx.config.OnnxSeq2SeqConfigWithPast`] <Tip> Una buena manera de implementar una configuración personalizada en ONNX es observar la implementación existente en el archivo `configuration_<model_name>.py` de una arquitectura similar. </Tip> Dado que DistilBERT es un modelo de tipo _encoder_, su configuración se hereda de `OnnxConfig`: ```python >>> from typing import Mapping, OrderedDict >>> from transformers.onnx import OnnxConfig >>> class DistilBertOnnxConfig(OnnxConfig): ... @property ... def inputs(self) -> Mapping[str, Mapping[int, str]]: ... return OrderedDict( ... [ ... ("input_ids", {0: "batch", 1: "sequence"}), ... ("attention_mask", {0: "batch", 1: "sequence"}), ... ] ... ) ``` Cada objeto de configuración debe implementar la propiedad `inputs` y devolver un mapeo, donde cada llave corresponde a una entrada esperada y cada valor indica el eje de esa entrada. Para DistilBERT, podemos ver que se requieren dos entradas: `input_ids` y `attention_mask`. Estas entradas tienen la misma forma de `(batch_size, sequence_length)`, es por lo que vemos los mismos ejes utilizados en la configuración. <Tip> Observa que la propiedad `inputs` para `DistilBertOnnxConfig` devuelve un `OrderedDict`. Esto nos asegura que las entradas coincidan con su posición relativa dentro del método `PreTrainedModel.forward()` al rastrear el grafo. Recomendamos usar un `OrderedDict` para las propiedades `inputs` y `outputs` al implementar configuraciones ONNX personalizadas. </Tip> Una vez que hayas implementado una configuración ONNX, puedes crear una instancia proporcionando la configuración del modelo base de la siguiente manera: ```python >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased") >>> onnx_config = DistilBertOnnxConfig(config) ``` El objeto resultante tiene varias propiedades útiles. Por ejemplo, puedes ver el conjunto de operadores ONNX que se utilizará durante la exportación: ```python >>> print(onnx_config.default_onnx_opset) 11 ``` También puedes ver los resultados asociados con el modelo de la siguiente manera: ```python >>> print(onnx_config.outputs) OrderedDict([("last_hidden_state", {0: "batch", 1: "sequence"})]) ``` Observa que la propiedad de salidas sigue la misma estructura que las entradas; devuelve un objecto `OrderedDict` de salidas nombradas y sus formas. La estructura de salida está vinculada a la elección de la función con la que se inicializa la configuración. Por defecto, la configuración de ONNX se inicializa con la función `default` que corresponde a exportar un modelo cargado con la clase `AutoModel`. Si quieres exportar una topología de modelo diferente, simplemente proporciona una característica diferente al argumento `task` cuando inicialices la configuración de ONNX. Por ejemplo, si quisiéramos exportar DistilBERT con un cabezal de clasificación de secuencias, podríamos usar: ```python >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased") >>> onnx_config_for_seq_clf = DistilBertOnnxConfig(config, task="sequence-classification") >>> print(onnx_config_for_seq_clf.outputs) OrderedDict([('logits', {0: 'batch'})]) ``` <Tip> Todas las propiedades base y métodos asociados con [`~onnx.config.OnnxConfig`] y las otras clases de configuración se pueden sobreescribir si es necesario. Consulte [`BartOnnxConfig`] para ver un ejemplo avanzado. </Tip> #### Exportar el modelo Una vez que hayas implementado la configuración de ONNX, el siguiente paso es exportar el modelo. Aquí podemos usar la función `export()` proporcionada por el paquete `transformers.onnx`. Esta función espera la configuración de ONNX, junto con el modelo base y el tokenizador, y la ruta para guardar el archivo exportado: ```python >>> from pathlib import Path >>> from transformers.onnx import export >>> from transformers import AutoTokenizer, AutoModel >>> onnx_path = Path("model.onnx") >>> model_ckpt = "distilbert/distilbert-base-uncased" >>> base_model = AutoModel.from_pretrained(model_ckpt) >>> tokenizer = AutoTokenizer.from_pretrained(model_ckpt) >>> onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path) ``` Los objetos `onnx_inputs` y `onnx_outputs` devueltos por la función `export()` son listas de llaves definidas en las propiedades `inputs` y `outputs` de la configuración. Una vez exportado el modelo, puedes probar que el modelo está bien formado de la siguiente manera: ```python >>> import onnx >>> onnx_model = onnx.load("model.onnx") >>> onnx.checker.check_model(onnx_model) ``` <Tip> Si tu modelo tiene más de 2GB, verás que se crean muchos archivos adicionales durante la exportación. Esto es _esperado_ porque ONNX usa [Búferes de protocolo](https://developers.google.com/protocol-buffers/) para almacenar el modelo y éstos tienen un límite de tamaño de 2 GB. Consulta la [documentación de ONNX](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md) para obtener instrucciones sobre cómo cargar modelos con datos externos. </Tip> #### Validar los resultados del modelo El paso final es validar que los resultados del modelo base y exportado coincidan dentro de cierta tolerancia absoluta. Aquí podemos usar la función `validate_model_outputs()` proporcionada por el paquete `transformers.onnx` de la siguiente manera: ```python >>> from transformers.onnx import validate_model_outputs >>> validate_model_outputs( ... onnx_config, tokenizer, base_model, onnx_path, onnx_outputs, onnx_config.atol_for_validation ... ) ``` Esta función usa el método `OnnxConfig.generate_dummy_inputs()` para generar entradas para el modelo base y exportado, y la tolerancia absoluta se puede definir en la configuración. En general, encontramos una concordancia numérica en el rango de 1e-6 a 1e-4, aunque es probable que cualquier valor menor que 1e-3 esté bien. ### Contribuir con una nueva configuración a 🤗 Transformers ¡Estamos buscando expandir el conjunto de configuraciones a la medida para usar y agradecemos las contribuciones de la comunidad! Si deseas contribuir con su colaboración a la biblioteca, deberás: * Implementa la configuración de ONNX en el archivo `configuration_<model_name>.py` correspondiente * Incluye la arquitectura del modelo y las características correspondientes en [`~onnx.features.FeatureManager`] * Agrega tu arquitectura de modelo a las pruebas en `test_onnx_v2.py` Revisa cómo fue la contribución para la [configuración de IBERT](https://github.com/huggingface/transformers/pull/14868/files) y así tener una idea de lo que necesito. ## TorchScript <Tip> Este es el comienzo de nuestros experimentos con TorchScript y todavía estamos explorando sus capacidades con modelos de tamaño de entrada variable. Es un tema de interés y profundizaremos nuestro análisis en las próximas versiones, con más ejemplos de código, una implementación más flexible y puntos de referencia que comparen códigos basados en Python con TorchScript compilado. </Tip> Según la documentación de PyTorch: "TorchScript es una forma de crear modelos serializables y optimizables a partir del código de PyTorch". Los dos módulos de Pytorch [JIT y TRACE](https://pytorch.org/docs/stable/jit.html) permiten al desarrollador exportar su modelo para reutilizarlo en otros programas, como los programas C++ orientados a la eficiencia. Hemos proporcionado una interfaz que permite exportar modelos de 🤗 Transformers a TorchScript para que puedan reutilizarse en un entorno diferente al de un programa Python basado en PyTorch. Aquí explicamos cómo exportar y usar nuestros modelos usando TorchScript. Exportar un modelo requiere de dos cosas: - un pase hacia adelante con entradas ficticias. - instanciación del modelo con la indicador `torchscript`. Estas necesidades implican varias cosas con las que los desarrolladores deben tener cuidado. Éstas se detallan a continuación. ### Indicador de TorchScript y pesos atados Este indicador es necesario porque la mayoría de los modelos de lenguaje en este repositorio tienen pesos vinculados entre su capa de `Embedding` y su capa de `Decoding`. TorchScript no permite la exportación de modelos que tengan pesos atados, por lo que es necesario desvincular y clonar los pesos previamente. Esto implica que los modelos instanciados con el indicador `torchscript` tienen su capa `Embedding` y `Decoding` separadas, lo que significa que no deben entrenarse más adelante. El entrenamiento desincronizaría las dos capas, lo que generaría resultados inesperados. Este no es el caso de los modelos que no tienen un cabezal de modelo de lenguaje, ya que no tienen pesos atados. Estos modelos se pueden exportar de forma segura sin el indicador `torchscript`. ### Entradas ficticias y longitudes estándar Las entradas ficticias se utilizan para crear un modelo de pase hacia adelante. Mientras los valores de las entradas se propagan a través de las capas, PyTorch realiza un seguimiento de las diferentes operaciones ejecutadas en cada tensor. Estas operaciones registradas se utilizan luego para crear el "rastro" del modelo. El rastro se crea en relación con las dimensiones de las entradas. Por lo tanto, está limitado por las dimensiones de la entrada ficticia y no funcionará para ninguna otra longitud de secuencia o tamaño de lote. Al intentar con un tamaño diferente, un error como: `The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2` aparecerá. Por lo tanto, se recomienda rastrear el modelo con un tamaño de entrada ficticia al menos tan grande como la entrada más grande que se alimentará al modelo durante la inferencia. El _padding_ se puede realizar para completar los valores que faltan. Sin embargo, como el modelo se habrá rastreado con un tamaño de entrada grande, las dimensiones de las diferentes matrices también serán grandes, lo que dará como resultado más cálculos. Se recomienda tener cuidado con el número total de operaciones realizadas en cada entrada y seguir de cerca el rendimiento al exportar modelos de longitud de secuencia variable. ### Usar TorchScript en Python A continuación se muestra un ejemplo que muestra cómo guardar, cargar modelos y cómo usar el rastreo para la inferencia. #### Guardando un modelo Este fragmento muestra cómo usar TorchScript para exportar un `BertModel`. Aquí, el `BertModel` se instancia de acuerdo con la clase `BertConfig` y luego se guarda en el disco con el nombre de archivo `traced_bert.pt` ```python from transformers import BertModel, BertTokenizer, BertConfig import torch enc = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") # Tokenizing input text text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" tokenized_text = enc.tokenize(text) # Masking one of the input tokens masked_index = 8 tokenized_text[masked_index] = "[MASK]" indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] # Creating a dummy input tokens_tensor = torch.tensor([indexed_tokens]) segments_tensors = torch.tensor([segments_ids]) dummy_input = [tokens_tensor, segments_tensors] # Initializing the model with the torchscript flag # Flag set to True even though it is not necessary as this model does not have an LM Head. config = BertConfig( vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, torchscript=True, ) # Instantiating the model model = BertModel(config) # The model needs to be in evaluation mode model.eval() # If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag model = BertModel.from_pretrained("google-bert/bert-base-uncased", torchscript=True) # Creating the trace traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) torch.jit.save(traced_model, "traced_bert.pt") ``` #### Cargar un modelo Este fragmento muestra cómo cargar el `BertModel` que se guardó previamente en el disco con el nombre `traced_bert.pt`. Estamos reutilizando el `dummy_input` previamente inicializado. ```python loaded_model = torch.jit.load("traced_bert.pt") loaded_model.eval() all_encoder_layers, pooled_output = loaded_model(*dummy_input) ``` #### Usar un modelo rastreado para la inferencia Usar el modelo rastreado para la inferencia es tan simple como usar su método `__call__`: ```python traced_model(tokens_tensor, segments_tensors) ``` ### Implementar los modelos HuggingFace TorchScript en AWS mediante Neuron SDK AWS presentó la familia de instancias [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) para la inferencia de aprendizaje automático de bajo costo y alto rendimiento en la nube. Las instancias Inf1 funcionan con el chip AWS Inferentia, un acelerador de hardware personalizado, que se especializa en cargas de trabajo de inferencia de aprendizaje profundo. [AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) es el kit de desarrollo para Inferentia que admite el rastreo y la optimización de modelos de transformers para su implementación en Inf1. El SDK de Neuron proporciona: 1. API fácil de usar con una línea de cambio de código para rastrear y optimizar un modelo de TorchScript para la inferencia en la nube. 2. Optimizaciones de rendimiento listas para usar con un [costo-rendimiento mejorado](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>) 3. Soporte para modelos HuggingFace Transformers construidos con [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) o [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html). #### Implicaciones Los modelos Transformers basados en la arquitectura [BERT (Representaciones de _Enconder_ bidireccional de Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert), o sus variantes, como [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) y [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta), se ejecutarán mejor en Inf1 para tareas no generativas, como la respuesta extractiva de preguntas, la clasificación de secuencias y la clasificación de tokens. Como alternativa, las tareas de generación de texto se pueden adaptar para ejecutarse en Inf1, según este [tutorial de AWS Neuron MarianMT](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html). Puedes encontrar más información sobre los modelos que están listos para usarse en Inferentia en la [sección _Model Architecture Fit_ de la documentación de Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia). #### Dependencias Usar AWS Neuron para convertir modelos requiere las siguientes dependencias y entornos: * Un [entorno Neuron SDK](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide), que viene preconfigurado en [AWS Deep Learning AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html). #### Convertir un modelo a AWS Neuron Con el mismo script usado en [Uso de TorchScript en Python](https://huggingface.co/docs/transformers/main/es/serialization#using-torchscript-in-python) para rastrear un "BertModel", puedes importar la extensión del _framework_ `torch.neuron` para acceder a los componentes del SDK de Neuron a través de una API de Python. ```python from transformers import BertModel, BertTokenizer, BertConfig import torch import torch.neuron ``` Y modificando la línea de código de rastreo de: ```python torch.jit.trace(model, [tokens_tensor, segments_tensors]) ``` con lo siguiente: ```python torch.neuron.trace(model, [token_tensor, segments_tensors]) ``` Este cambio permite a Neuron SDK rastrear el modelo y optimizarlo para ejecutarse en instancias Inf1. Para obtener más información sobre las funciones, las herramientas, los tutoriales de ejemplo y las últimas actualizaciones de AWS Neuron SDK, consulte la [documentación de AWS NeuronSDK](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
transformers/docs/source/es/serialization.md/0
{ "file_path": "transformers/docs/source/es/serialization.md", "repo_id": "transformers", "token_count": 10517 }
31
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Chargement d'instances pré-entraînées avec une AutoClass Avec autant d'architectures Transformer différentes, il peut être difficile d'en créer une pour votre ensemble de poids (aussi appelés "weights" ou "checkpoint" en anglais). Dans l'idée de créer une librairie facile, simple et flexible à utiliser, 🤗 Transformers fournit une `AutoClass` qui infère et charge automatiquement l'architecture correcte à partir d'un ensemble de poids donné. La fonction `from_pretrained()` vous permet de charger rapidement un modèle pré-entraîné pour n'importe quelle architecture afin que vous n'ayez pas à consacrer du temps et des ressources à l'entraînement d'un modèle à partir de zéro. Produire un tel code indépendant d'un ensemble de poids signifie que si votre code fonctionne pour un ensemble de poids, il fonctionnera avec un autre ensemble - tant qu'il a été entraîné pour une tâche similaire - même si l'architecture est différente. <Tip> Rappel, l'architecture fait référence au squelette du modèle et l'ensemble de poids contient les poids pour une architecture donnée. Par exemple, [BERT](https://huggingface.co/google-bert/bert-base-uncased) est une architecture, tandis que `google-bert/bert-base-uncased` est un ensemble de poids. Le terme modèle est général et peut signifier soit architecture soit ensemble de poids. </Tip> Dans ce tutoriel, vous apprendrez à: * Charger un tokenizer pré-entraîné. * Charger un processeur d'image pré-entraîné. * Charger un extracteur de caractéristiques pré-entraîné. * Charger un processeur pré-entraîné. * Charger un modèle pré-entraîné. ## AutoTokenizer Quasiment toutes les tâches de traitement du langage (NLP) commencent avec un tokenizer. Un tokenizer convertit votre texte initial dans un format qui peut être traité par le modèle. Chargez un tokenizer avec [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") ``` Puis, transformez votre texte initial comme montré ci-dessous: ```py >>> sequence = "In a hole in the ground there lived a hobbit." >>> print(tokenizer(sequence)) {'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` ## AutoImageProcessor Pour les tâches de vision, un processeur d'image traite l'image pour la formater correctment. ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ## AutoBackbone <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">Un backbone Swin avec plusieurs étapes pour produire une carte de caractéristiques.</figcaption> </div> [`AutoBackbone`] vous permet d'utiliser des modèles pré-entraînés comme backbones pour obtenir des cartes de caractéristiques à partir de différentes étapes du backbone. Vous devez spécifier l'un des paramètres suivants dans [`~PretrainedConfig.from_pretrained`] : * `out_indices` est l'index de la couche dont vous souhaitez obtenir la carte de caractéristiques * `out_features` est le nom de la couche dont vous souhaitez obtenir la carte de caractéristiques Ces paramètres peuvent être utilisés de manière interchangeable, mais si vous utilisez les deux, assurez-vous qu'ils sont alignés l'un avec l'autre ! Si vous ne passez aucun de ces paramètres, le backbone renvoie la carte de caractéristiques de la dernière couche. <div style="text-align: center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png"> <figcaption class="mt-2 text-center text-sm text-gray-500">Une carte de caractéristiques de la première étape du backbone. La partition de patch fait référence à la tige du modèle.</figcaption> </div> Par exemple, dans le diagramme ci-dessus, pour renvoyer la carte de caractéristiques de la première étape du backbone Swin, vous pouvez définir `out_indices=(1,)` : ```py >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") >>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,)) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps ``` Vous pouvez maintenant accéder à l'objet `feature_maps` de la première étape du backbone : ```py >>> list(feature_maps[0].shape) [1, 96, 56, 56] ``` ## AutoFeatureExtractor Pour les tâches audio, un extracteur de caractéristiques (aussi appelés "features" en anglais) traite le signal audio pour le formater correctement. Chargez un extracteur de caractéristiques avec [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained( ... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" ... ) ``` ## AutoProcessor Les tâches multimodales nécessitent un processeur qui combine deux types d'outils de prétraitement. Par exemple, le modèle [LayoutLMV2](model_doc/layoutlmv2) nécessite un processeur d'image pour traiter les images et un tokenizer pour traiter le texte ; un processeur combine les deux. Chargez un processeur avec [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") ``` ## AutoModel <frameworkcontent> <pt> Enfin, les classes `AutoModelFor` vous permettent de charger un modèle pré-entraîné pour une tâche donnée (voir [ici](model_doc/auto) pour une liste complète des tâches disponibles). Par exemple, chargez un modèle pour la classification de séquence avec [`AutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Réutilisez facilement le même ensemble de poids pour charger une architecture pour une tâche différente : ```py >>> from transformers import AutoModelForTokenClassification >>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip warning={true}> Pour les modèles PyTorch, la fonction `from_pretrained()` utilise `torch.load()` qui utilise `pickle` en interne et est connu pour être non sécurisé. En général, ne chargez jamais un modèle qui pourrait provenir d'une source non fiable, ou qui pourrait avoir été altéré. Ce risque de sécurité est partiellement atténué pour les modèles hébergés publiquement sur le Hugging Face Hub, qui sont [scannés pour les logiciels malveillants](https://huggingface.co/docs/hub/security-malware) à chaque modification. Consultez la [documentation du Hub](https://huggingface.co/docs/hub/security) pour connaître les meilleures pratiques comme la [vérification des modifications signées](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) avec GPG. Les points de contrôle TensorFlow et Flax ne sont pas concernés, et peuvent être chargés dans des architectures PyTorch en utilisant les arguments `from_tf` et `from_flax` de la fonction `from_pretrained` pour contourner ce problème. </Tip> En général, nous recommandons d'utiliser les classes `AutoTokenizer` et `AutoModelFor` pour charger des instances pré-entraînées de tokenizers et modèles respectivement. Cela vous permettra de charger la bonne architecture à chaque fois. Dans le prochain [tutoriel](preprocessing), vous apprenez à utiliser un tokenizer, processeur d'image, extracteur de caractéristiques et processeur pour pré-traiter un jeu de données pour le fine-tuning. </pt> <tf> Enfin, les classes `TFAutoModelFor` vous permettent de charger un modèle pré-entraîné pour une tâche donnée (voir [ici](model_doc/auto) pour une liste complète des tâches disponibles). Par exemple, chargez un modèle pour la classification de séquence avec [`TFAutoModelForSequenceClassification.from_pretrained`]: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` Réutilisez facilement le même ensemble de poids pour charger une architecture pour une tâche différente : ```py >>> from transformers import TFAutoModelForTokenClassification >>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` En général, nous recommandons d'utiliser les classes `AutoTokenizer` et `TFAutoModelFor` pour charger des instances pré-entraînées de tokenizers et modèles respectivement. Cela vous permettra de charger la bonne architecture à chaque fois. Dans le prochain [tutoriel](preprocessing), vous apprenez à utiliser un tokenizer, processeur d'image, extracteur de caractéristiques et processeur pour pré-traiter un jeu de données pour le fine-tuning. </tf> </frameworkcontent>
transformers/docs/source/fr/autoclass_tutorial.md/0
{ "file_path": "transformers/docs/source/fr/autoclass_tutorial.md", "repo_id": "transformers", "token_count": 3454 }
32
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Come aggiungere un modello a 🤗 Transformers? Aggiungere un nuovo modello é spesso difficile e richiede una profonda conoscenza della libreria 🤗 Transformers e anche della repository originale del modello. A Hugging Face cerchiamo di dare alla community sempre piú poteri per aggiungere modelli independentemente. Quindi, per alcuni nuovi modelli che la community vuole aggiungere a 🤗 Transformers, abbiamo creato una specifica *call-for-model-addition* che spiega passo dopo passo come aggiungere il modello richiesto. Con questo *call-for-model-addition* vogliamo insegnare a volenterosi e esperti collaboratori della community come implementare un modello in 🤗 Transformers. Se questo é qualcosa che può interessarvi, siete liberi di controllare l'attuale “calls-for-model-addition” [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model/open_model_proposals/README.md) e contattarci. Se il modello sarà selezionato, allora potrete lavorare insieme a un membro di Hugging Face per integrare il modello in 🤗 Transformers. Così facendo, ci guadagnerai in una comprensione totale, sia teorica che pratica, del modello proposto. Inoltre, sarai l'artefice di un importante contributo open-source a 🤗 Transformers. Durante l'implementazione avrai l'opportunità di: - ottenere più comprensione delle best practices in open-source - capire i principi di design di una della librerie NLP più popolari - capire come efficientemente testare complessi modelli NLP - capire come integrare utilit Python come `black`, `ruff`, `make fix-copies` in una libreria per garantire sempre di avere un codice leggibile e pulito Siamo anche contenti se vuoi aggiungere un modello che non può essere trovato nella cartella “calls-for-model-addition”. Le seguenti sezioni spiegano in dettaglio come aggiungere un nuovo modello. Può anche essere molto utile controllare modelli già aggiunti [qui](https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed), per capire se richiamano il modello che vorreste aggiungere. Per cominciare, vediamo una panoramica general della libreria Transformers. ## Panoramica generale su 🤗 Transformers Prima di tutto, vediamo in generale 🤗 Transformers. 🤗 Transformers é una libreria molto strutturata, quindi puà essere che a volte ci sia un disaccordo con alcune filosofie della libreria o scelte di design. Dalla nostra esperienza, tuttavia, abbiamo trovato che le scelte fondamentali di design della libreria sono cruciali per usare 🤗 Transformers efficacemente su larga scala, mantenendo i costi a un livello accettabile. Un buon primo punto di partenza per capire al meglio la libreria é leggere la [documentazione sulla nostra filosofia](filosofia) Da qui, ci sono alcune scelte sul modo di lavorare che cerchiamo di applicare a tutti i modelli: - La composizione é generalmente favorita sulla sovra-astrazione - Duplicare il codice non é sempre male, soprattutto se migliora notevolmente la leggibilità e accessibilità del modello - Tutti i files creati per il nuovo modello devono il piu possibile "compatti". Questo vuol dire che quando qualcuno leggerá il codice di uno specifico modello, potrá vedere solo il corrispettivo file `modeling_....py` senza avere multiple dipendenze. La cosa piú importante, é che consideriamo la libreria non solo un mezzo per dare un prodotto, *per esempio* dare la possibilità di usare BERT per inferenza, ma é anche il prodotto reale che noi vogliamo migliorare sempre più. Quindi, quando aggiungi un modello, non sei solo la persona che userà il modello, ma rappresenti anche tutti coloro che leggeranno, cercheranno di capire e modificare il tuo modello. Tenendo questi principi in mente, immergiamoci nel design generale della libreria. ### Panoramica sui modelli Per aggiungere con successo un modello, é importante capire l'interazione tra il tuo modello e la sua configurazione, [`PreTrainedModel`], e [`PretrainedConfig`]. Per dare un esempio, chiameremo il modello da aggiungere a 🤗 Transformers `BrandNewBert`. Diamo un'occhiata: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> Come potete vedere, ci basiamo sull'ereditarietà in 🤗 Transformers, tenendo però il livello di astrazione a un minimo assoluto. Non ci sono mai più di due livelli di astrazione per ogni modello nella libreria. `BrandNewBertModel` eredita da `BrandNewBertPreTrainedModel` che, a sua volta, eredita da [`PreTrainedModel`] - semplice no? Come regola generale, vogliamo essere sicuri che un nuovo modello dipenda solo da [`PreTrainedModel`]. Le funzionalità importanti che sono automaticamente conferite a ogni nuovo modello sono [`~PreTrainedModel.from_pretrained`] e [`~PreTrainedModel.save_pretrained`], che sono usate per serializzazione e deserializzazione. Tutte le altre importanti funzionalità, come ad esempio `BrandNewBertModel.forward` devono essere definite completamente nel nuovo script `modeling_brand_new_bert.py`. Inoltre, vogliamo essere sicuri che un modello con uno specifico head layer, come `BrandNewBertForMaskedLM` non erediti da `BrandNewBertModel`, ma piuttosto usi `BrandNewBertModel` come componente che può essere chiamata nel passaggio forward per mantenere il livello di astrazione basso. Ogni nuovo modello richieste una classe di configurazione, chiamata `BrandNewBertConfig`. Questa configurazione é sempre mantenuta come un attributo in [`PreTrainedModel`], e quindi può essere accessibile tramite l'attributo `config` per tutte le classi che ereditano da `BrandNewBertPreTrainedModel`: ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # il modello ha accesso al suo config ``` Analogamente al modello, la configurazione eredita le funzionalità base di serializzazione e deserializzazione da [`PretrainedConfig`]. É da notare che la configurazione e il modello sono sempre serializzati in due formati differenti - il modello é serializzato in un file *pytorch_model.bin* mentre la configurazione con *config.json*. Chiamando [`~PreTrainedModel.save_pretrained`] automaticamente chiamerà [`~PretrainedConfig.save_pretrained`], cosicché sia il modello che la configurazione siano salvati. ### Stile per il codice Quando codifichi un nuovo modello, tieni presente che Transformers ha una sua struttura di fondo come libreria, perciò ci sono alcuni fatti da considerare su come scrivere un codice :-) 1. Il forward pass del tuo modello dev'essere scritto completamente nel file del modello, mentre dev'essere indipendente da altri modelli nella libreria. Se vuoi riutilizzare un blocco di codice da un altro modello, copia e incolla il codice con un commento `# Copied from` in cima al codice (guarda [qui](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) per un ottimo esempio). 2. Il codice dev'essere interamente comprensibile, anche da persone che non parlano in inglese. Questo significa che le variabili devono avere un nome descrittivo e bisogna evitare abbreviazioni. Per esempio, `activation` é molto meglio che `act`. Le variabili con una lettera sono da evitare fortemente, almeno che non sia per un indce in un for loop. 3. Generamente é meglio avere un codice esplicito e piú lungo che un codice corto e magico. 4. Evita di subclassare `nn.Sequential` in Pytorch, puoi subclassare `nn.Module` e scrivere il forward pass, cosicché chiunque può effettuare debug sul tuo codice, aggiungendo print o breaking points. 5. La tua function-signature dev'essere type-annoted. Per il resto, é meglio preferire variabili con un nome accettabile piuttosto che annotazioni per aumentare la comprensione e leggibilità del codice. ### Panoramica sui tokenizers Questa sezione sarà creata al piu presto :-( ## Aggiungere un modello a 🤗 Transformers passo dopo passo Ci sono differenti modi per aggiungere un modello a Hugging Face. Qui trovi una lista di blog posts da parte della community su come aggiungere un modello: 1. [Aggiungere GPT2](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) scritto da [Thomas](https://huggingface.co/thomwolf) 2. [Aggiungere WMT19 MT](https://huggingface.co/blog/porting-fsmt) scritto da [Stas](https://huggingface.co/stas) Per esperienza, possiamo dirti che quando si aggiunge un modello é meglio tenere a mente le seguenti considerazioni: - Non sfondare una porta giá aperta! La maggior parte del codice che aggiungerai per un nuovo modello 🤗 Transformers esiste già da qualche parte in 🤗 Transformers. Prendi un po' di tempo per trovare codici simili in modelli e tokenizers esistenti e fare un copia-incolla. Ricorda che [grep](https://www.gnu.org/software/grep/) e [rg](https://github.com/BurntSushi/ripgrep) sono tuoi buoni amici. Inoltre, ricorda che puó essere molto probabile che il tokenizer per il tuo modello sia basato sull'implementazione di un altro modello, e il codice del tuo modello stesso su un altro ancora. *Per esempio* il modello FSMT é basato su BART, mentre il tokenizer di FSMT é basato su XLM. - Ricorda che qui é piu una sfida ingegneristica che scientifica. Spendi piú tempo per create un efficiente ambiente di debugging piuttosto che cercare di capire tutti gli aspetti teorici dell'articolo del modello. - Chiedi aiuto se sei in panne! I modelli sono la parte principale di 🤗 Transformers, perciò qui a Hugging Face siamo più che contenti di aiutarti in ogni passo per aggiungere il tuo modello. Non esitare a chiedere se vedi che non riesci a progredire. Di seguito, diamo una ricetta generale per aiutare a portare un modello in 🤗 Transformers. La lista seguente é un sommario di tutto quello che é stato fatto per aggiungere un modello, e può essere usata come To-Do List: - 1. ☐ (Opzionale) Capire gli aspetti teorici del modello - 2. ☐ Preparare l'ambiente dev per transformers - 3. ☐ Preparare l'ambiente debugging della repository originale - 4. ☐ Create uno script che gestisca con successo il forward pass usando la repository originale e checkpoint - 5. ☐ Aggiungere con successo lo scheletro del modello a Transformers - 6. ☐ Convertire i checkpoint original a Transformers checkpoint - 7. ☐ Effettuare con successo la forward pass in Transformers, di modo che dia un output identico al checkpoint originale - 8. ☐ Finire i tests per il modello in Transformers - 9. ☐ Aggiungere con successo Tokenizer in Transformers - 10. ☐ Testare e provare gli integration tests da capo a fine - 11. ☐ Completare i docs - 12. ☐ Caricare i moedl weights all'hub - 13. ☐ Sottomettere una pull request - 14. ☐ (Opzionale) Aggiungere un notebook con una demo Per cominciare di solito consigliamo `BrandNewBert`, partendo dalla teoria, di modo da avere una buona comprensione della teoria generale. TUttavia, se preferisci imparare l'aspetto teorico del modello mentre *lavori* sul modello é ok immergersi direttamente nel codice di `BrandNewBert`. Questa opzione puó essere buona se le tue skills ingegneristiche sono meglio che quelle teoriche, o se il paper `BrandNewBert` ti dá problemi, o se semplicemente ti piace programmare piú che leggere articoli scientifici. ### 1. (Opzionale) Aspetti teorici di BrandNewBert Allora con calma, prendi un po' di tempo per leggere l'articolo su *BrandNewBert* . Sicuramente, alcune sezioni dell'articolo sono molto complesse, ma non preoccuparti! L'obiettivo non é avere una compresione immensa della teoria alla base, ma estrarre le informazioni necessarie per re-implementare con successo il modello in 🤗 Transformers. Quindi, non impazzire sugli aspetti teorici, ma piuttosto focalizzati su quelli pratici, ossia: - Che tipo di modello é *brand_new_bert*? É solo un encoder in stile BERT? O tipo decoder come GPT2? O encoder e decoder stile BART? Dai un'occhiata a [model_summary](model_summary) se non sei famigliare con le differenze tra questi modelli - Quali sono le applicazioni di *brand_new_bert*? Classificazione di testo? Generazione di testo? O per tasks del genere seq2seq? - Quali sono le nuove aggiunte al modello che lo rendono diverso da BERT/GPT-2/BART? - Quali modelli estistenti in [🤗 Transformers models](https://huggingface.co/transformers/#contents) sono molto simili a *brand_new_bert*? - Che tipo di tokenizer si usa in questo caso? Un sentencepiece tokenizer? O un word piece tokenizer? Il tokenizer é lo stesso di BERT o BART? Una volta che senti che hai avuto una bella overview dell'architettura del modello, puoi scrivere senza problemi al team di Hugging Face per ogni domanda che tu hai. Questo puó includere domande sull'architettura del modello, o sull'attention layer, etc. Saremo molto felici di aiutarti :) ### 2. Prepare il tuo ambiente 1. Forka la [repository](https://github.com/huggingface/transformers) cliccando sul tasto ‘Fork' nella pagina della repository. Questo crea una copia del codice nel tuo account GitHub 2. Clona il tuo fork `transfomers` sul tuo dico locale, e aggiungi la repository base come remota: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Crea un ambiente di sviluppo, per esempio tramite questo comando: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` quindi torna alla directory principale: ```bash cd .. ``` 4. Attenzione, raccomandiamo di aggiungere la versione di PyTorch di *brand_new_bert* a Transfomers. Per installare PyTorch, basta seguire queste istruzioni https://pytorch.org/get-started/locally/. **Nota bene:** Non c'é bisogno di installare o avere installato CUDA. Il nuovo modello può funzionare senza problemi su una CPU. 5. Per trasferire *brand_new_bert* To port *brand_new_bert* avrai bisogno anche accesso alla sua repository originale: ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` Ok, ora hai un ambiente di sviluppo per portare *brand_new_bert* in 🤗 Transformers. ### 3.-4. Provare un pretrained checkpoint usando la repo originale Per cominciare, comincerai a lavorare sulla repo originale di *brand_new_bert*. Come spesso accade, l'implementazione originale é molto sullo stile "ricerca". Questo significa che a volte la documentazione non é al top, magari manca qualche cosa e il codice puó essere difficile da capire. Tuttavia, questa é e dev'essere la motivazione per reimplementare *brand_new_bert*. In Hugging Face, uno degli obiettivi principali é di *mettere le persone sulle spalle dei giganti*, il che si traduce, in questo contesto, di prendere un modello funzionante e riscriverlo e renderlo il piú possibile **accessibile, user-friendly, e leggibile**. Questa é la top motivazione per re-implementare modelli in 🤗 Transformers - cercare di creare nuove complesse tecnologie NLP accessibili a **chiunque**. Riuscire a far girare il modello pretrained originale dalla repository ufficiale é spesso il passo **piu arduo**. Dalla nostra esperienza, é molto importante spendere un p' di tempo per diventare familiari con il codice base originale. Come test, prova a capire i seguenti punti: - Dove si trovano i pretrained weights? - Come caricare i pretrained weights nel modello corrispondente? - Come girare un tokenizer independentemente dal modello? - Prova a tracciare un singolo forward pass, cosicché potrai sapere che classi e funzioni sono richieste per un semplice forward pass. Di solito, dovrai reimplementare queste funzioni e basta - Prova a localizzare i componenti importanti del modello: Dove si trova la classe del modello? Ci sono sotto classi nel modello *per esempio* EngoderModel, DecoderMOdel? Dove si trova il self-attention layer? Ci sono molteplici differenti layer di attention, *per esempio * *self-attention*, *cross-attention*...? - Come puoi fare debug sul modello nell'ambiente originale della repo? Devi aggiungere dei *print* o puoi usare *ipdb* come debugger interattivo, o vabene anche un IDE efficiente per debug come PyCharm? É molto importante che prima di cominciare a trasferire il modello nuovo tu spenda tempo a fare debug del codice originale in maniera **efficiente**! Inoltre, ricorda che tutta la library é open-soruce, quindi non temere di aprire issue o fare una pull request nella repo originale. Tutti coloro che mantengono la repository saranno piú che felici di avere qualcuno che guarda e gioca con i loro codici! A questo punto, sta a te decidere quale ambiente per debug vuoi usare. Noi consilgiamo di evitare setup con GPU, che potrebbero costare assai, lavorare su una CPU puó essere un ottimo punto di partenza per indagare la repository originale e per cominciare a scrivere il codice per 🤗 Transformers. Solo alla fine, quando il modello é stato portato con successo in 🤗 Transformers, allora si potrá verificare il suo funzionamento su GPU. In generale ci sono due possibili ambienti di debug per il testare il modello originale: - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - Scripts locali in Python Il vantaggio dei Jupyter notebooks é la possibilità di eseguire cella per cella, il che può essere utile per decomporre tutte le componenti logiche, cosi da a vere un ciclo di debug più rapido, siccome si possono salvare i risultati da steps intermedi. Inoltre, i notebooks spesso sono molto facili da condividere con altri contributors, il che può essere molto utile se vuoi chiedere aiuto al team di Hugging Face. Se sei famigliare con Jupyter notebooks allora racommandiamo di lavorare in questa maniera. Ovviamente se non siete abituati a lavorare con i notebook, questo può essere uno svantaggio nell'usare questa tecnologia, sprecando un sacco di tempo per setup e portare tutto al nuovo ambiente, siccome non potreste neanche usare dei tools di debug come `ipdb`. Per ogni pratica code-base, é sempre meglio come primo step caricare un **piccolo** checkpoint pretrained e cercare di riprodurre un singolo forward pass usando un vettore fittizio di IDs fatti da numeri interi. Un esempio per uno script simile, in pseudocodice é: ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` Per quanto riguarda la strategia di debugging, si può scegliere tra: - Decomporre il modello originario in piccole componenenti e testare ognuna di esse - Decomporre il modello originario nel *tokenizer* originale e nel *modello* originale, testare un forward pass su questi, e usare dei print statement o breakpoints intermedi per verificare Ancora una volta, siete liberi di scegliere quale strategia sia ottimale per voi. Spesso una strategia é piu avvantaggiosa di un'altra, ma tutto dipende dall'code-base originario. Se il code-base vi permette di decomporre il modello in piccole sub-componenenti, *per esempio* se il code-base originario può essere facilmente testato in eager mode, allora vale la pena effettuare un debugging di questo genere. Ricordate che ci sono dei vantaggi nel decidere di prendere la strada piu impegnativa sin da subito: - negli stage piu finali, quando bisognerà comparare il modello originario all'implementazione in Hugging Face, potrete verificare automaticamente ogni componente, individualmente, di modo che ci sia una corrispondenza 1:1 - avrete l'opportunità di decomporre un problema molto grande in piccoli passi, così da strutturare meglio il vostro lavoro - separare il modello in componenti logiche vi aiuterà ad avere un'ottima overview sul design del modello, quindi una migliore comprensione del modello stesso - verso gli stage finali i test fatti componente per componente vi aiuterà ad essere sicuri di non andare avanti e indietro nell'implementazione, così da continuare la modifica del codice senza interruzione Un ottimo esempio di come questo può essere fatto é dato da [Lysandre](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) per il modello ELECTRA Tuttavia, se il code-base originale é molto complesso o le componenti intermedie possono essere testate solo in tramite compilazione, potrebbe richiedere parecchio tempo o addirittura essere impossibile separare il modello in piccole sotto-componenti. Un buon esempio é [MeshTensorFlow di T5](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow). Questa libreria é molto complessa e non offre un metodo semplice di decomposizione in sotto-componenti. Per simili librerie, potrete fare affidamento ai print statements. In ogni caso, indipendentemente da quale strategia scegliete, la procedura raccomandata é di cominciare a fare debug dal primo layer al layer finale. É consigliato recuperare gli output dai layers, tramite print o sotto-componenti, nel seguente ordine: 1. Recuperare gli IDs di input dati al modello 2. Recuperare i word embeddings 3. Recuperare l'input del primo Transformer layer 4. Recuperare l'output del primo Transformer layer 5. Recuperare l'output dei seguenti `n - 1` Transformer layers 6. Recuperare l'output dell'intero BrandNewBert Model Gli IDs in input dovrebbero essere un arrary di interi, *per esempio* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` Gli output dei seguenti layer di solito dovrebbero essere degli array di float multi-dimensionali come questo: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` Ci aspettiamo che ogni modello aggiunto a 🤗 Transformers passi con successo un paio di test d'integrazione. Questo significa che il modello originale e la sua implementazione in 🤗 Transformers abbiano lo stesso output con una precisione di 0.001! Siccome é normale che lo stesso esatto modello, scritto in librerie diverse, possa dare output leggermente diversi, la tolleranza accettata é 1e-3 (0.001). Ricordate che i due modelli devono dare output quasi identici. Dunque, é molto conveniente comparare gli output intermedi di 🤗 Transformers molteplici volte con gli output intermedi del modello originale di *brand_new_bert*. Di seguito vi diamo alcuni consigli per avere un ambiente di debug il piu efficiente possibile: - Trovate la migliore strategia per fare debug dei risultati intermedi. Per esempio, é la repository originale scritta in PyTorch? Se si, molto probabilmente dovrete dedicare un po' di tempo per scrivere degli script piu lunghi, così da decomporre il modello originale in piccole sotto-componenti, in modo da poter recuperare i valori intermedi. Oppure, la repo originale é scritta in Tensorflow 1? Se é così dovrete fare affidamento ai print di Tensorflow [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) per avere i valori intermedi. Altro caso, la repo é scritta in Jax? Allora assicuratevi che il modello non sia in **jit** quanto testate il foward pass, *per esempio* controllate [questo link](https://github.com/google/jax/issues/196). - Usate i più piccoli pretrained checkpoint che potete trovare. Piu piccolo é il checkpoint, piu velocemente sarà il vostro ciclo di debug. Non é efficiente avere un pretrained model così gigante che per il forward pass impieghi piu di 10 secondi. Nel caso in cui i checkpoints siano molto grandi, e non si possa trovare di meglio, allora é buona consuetudine ricorrere a fare un dummy model nel nuovo ambiente, con weights inizializzati random e salvare quei weights per comprare la versione 🤗 Transformers con il vostro modello - Accertatevi di usare la via piu semplice per chiamare il forward pass nella repo originale. Sarebbe opportuno trovare la funzione originaria che chiami **solo** un singolo forward pass, *per esempio* questa funzione spesso viene chiamata `predict`, `evaluate`, `forward` o `__call__`. Siate sicuri di non fare debug su una funzione che chiami `forward` molteplici volte, *per esempio* per generare testo, come `autoregressive_sample`, `generate`. - Cercate di separare la tokenization dal forward pass del modello. Se la repo originaria mostra esempio dove potete dare come input una stringa, provate a cercare dove nella forward call la stringa viene cambiata in input ids e cominciate il debug da questo punto. Questo vi garantisce un ottimo punto di partenza per scrivere un piccolo script personale dove dare gli input al modello, anziche delle stringhe in input. - Assicuratevi che il debugging **non** sia in training mode. Spesso questo potra il modello a dare degli output random, per via dei molteplici dropout layers. Assicuratevi che il forward pass nell'ambiente di debug sia **deterministico**, cosicche i dropout non siano usati. Alternativamente, potete usare *transformers.utils.set_seed* se la vecchia e nuova implementazione sono nello stesso framework. La seguente sezione vi da ulteriori dettagli e accorgimenti su come potete fare tutto questo per *brand_new_bert*. ### 5.-14. Trasferire BrandNewBert in 🤗 Transformers Allora cominciamo ad aggiungere un nuovo codice in 🤗 Transformers. Andate nel vostro fork clone di 🤗 Transformers: ```bash cd transformers ``` Nel caso speciale in cui stiate aggiungendo un modello, la cui architettura sia identica a una di un modello già esistente, dovrete solo aggiugnere uno script di conversione, come descritto [qui](#write-a-conversion-script). In questo caso, potete riutilizzare l'intera architettura del modello gia esistente. Se questo non é il caso, cominciamo con il generare un nuovo modello. Ti consigliamo di utilizzare il seguente script per aggiungere un modello a partire da un modello esistente: ```bash transformers-cli add-new-model-like ``` Ti verrà richiesto con un questionario di compilare le informazioni di base del tuo modello. **Aprire una Pull Request in main huggingface/transformers repo** Prime di cominciare ad adattare il codice automaticamente generato, aprite una nuova PR come "Work in progress (WIP)", *per esempio* "[WIP] Aggiungere *brand_new_bert*", cosicché il team di Hugging Face possa lavorare al vostro fianco nell' integrare il modello in 🤗 Transformers. Questi sarebbero gli step generali da seguire: 1. Creare un branch dal main branch con un nome descrittivo ```bash git checkout -b add_brand_new_bert ``` 2. Commit del codice automaticamente generato ```bash git add . git commit ``` 3. Fare fetch e rebase del main esistente ```bash git fetch upstream git rebase upstream/main ``` 4. Push dei cambiamenti al proprio account: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. Una volte che siete soddisfatti dei nuovi cambiamenti, andate sulla webpage del vostro fork su GitHub. Cliccate "Pull request". Assiuratevi di aggiungere alcuni membri di Hugging Face come reviewers, nel riguardo alla destra della pagina della PR, cosicche il team Hugging Face verrà notificato anche per i futuri cambiamenti. 6. Cambiare la PR a draft, cliccando su "Convert to draft" alla destra della pagina della PR Da quel punto in poi, ricordate di fare commit di ogni progresso e cambiamento, cosicche venga mostrato nella PR. Inoltre, ricordatevi di tenere aggiornato il vostro lavoro con il main esistente: ```bash git fetch upstream git merge upstream/main ``` In generale, tutte le domande che avrete riguardo al modello o l'implementazione dovranno essere fatte nella vostra PR e discusse/risolte nella PR stessa. In questa maniera, il team di Hugging Face sarà sempre notificato quando farete commit di un nuovo codice o se avrete qualche domanda. É molto utile indicare al team di Hugging Face il codice a cui fate riferimento nella domanda, cosicche il team potra facilmente capire il problema o la domanda. Per fare questo andate sulla tab "Files changed", dove potrete vedere tutti i vostri cambiamenti al codice, andate sulla linea dove volete chiedere una domanda, e cliccate sul simbolo "+" per aggiungere un commento. Ogni volta che una domanda o problema é stato risolto, cliccate sul bottone "Resolve". In questa stessa maniera, Hugging Face aprirà domande o commenti nel rivedere il vostro codice. Mi raccomando, chiedete più domande possibili nella pagina della vostra PR. Se avete domande molto generali, non molto utili per il pubblico, siete liberi di chiedere al team Hugging Face direttamente su slack o email. **5. Adattare i codici per brand_new_bert** Per prima cosa, ci focalizzeremo sul modello e non sui tokenizer. Tutto il codice relative dovrebbe trovarsi in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` e `src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. Ora potete finalmente cominciare il codice :). Il codice generato in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` avrà sia la stessa architettura di BERT se é un modello encoder-only o BART se é encoder-decoder. A questo punto, ricordatevi cio che avete imparato all'inizio, riguardo agli aspetti teorici del modello: *In che maniera il modello che sto implmementando é diverso da BERT o BART?*. Implementare questi cambi spesso vuol dire cambiare il layer *self-attention*, l'ordine dei layer di normalizzazione e così via... Ancora una volta ripetiamo, é molto utile vedere architetture simili di modelli gia esistenti in Transformers per avere un'idea migliore su come implementare il modello. **Notate** che a questo punto non dovete avere subito un codice tutto corretto o pulito. Piuttosto, é consigliato cominciare con un codice poco pulito, con copia-incolla del codice originale in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` fino a che non avrete tutto il codice necessario. In base alla nostra esperienza, é molto meglio aggiungere una prima bozza del codice richiesto e poi correggere e migliorare iterativamente. L'unica cosa essenziale che deve funzionare qui é la seguente instanza: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` Questo comando creerà un modello con i parametri di default definiti in `BrandNewBergConfig()` e weights random. Questo garantisce che `init()` di tutte le componenti funzioni correttamente. **6. Scrivere uno script di conversione** Il prossimo step é scrivere uno script per convertire il checkpoint che avete usato per fare debug su *brand_new_berts* nella repo originale in un checkpoint per la nuova implementazione di *brand_new_bert* in 🤗 Transformers. Non é consigliato scrivere lo script di conversione da zero, ma piuttosto cercate e guardate script gia esistenti in 🤗 Transformers, così da trovarne uno simile al vostro modello. Di solito basta fare una copia di uno script gia esistente e adattarlo al vostro caso. Non esistate a chiedre al team di Hugging Face a riguardo. - Se state convertendo un modello da TensorFlow a PyTorch, un ottimo inizio é vedere [questo script di conversione per BERT](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - Se state convertendo un modello da PyTorch a PyTorch, [lo script di conversione di BART può esservi utile](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) Qui di seguito spiegheremo come i modelli PyTorch salvano i weights per ogni layer e come i nomi dei layer sono definiti. In PyTorch, il nomde del layer é definito dal nome della class attribute che date al layer. Definiamo un modello dummy in PyTorch, chiamato `SimpleModel`: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` Ora possiamo creare un'instanza di questa definizione di modo da inizializzare a random weights: `dense`, `intermediate`, `layer_norm`. Possiamo usare print per vedere l'architettura del modello: ```python model = SimpleModel() print(model) ``` Da cui si ottiene: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` Si può vedere come i nomi dei layers siano definiti dal nome della class attribute in PyTorch. I valori dei weights di uno specifico layer possono essere visualizzati: ```python print(model.dense.weight.data) ``` ad esempio: ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` Nello script di conversione, dovreste riempire quei valori di inizializzazione random con gli stessi weights del corrispondente layer nel checkpoint. *Per esempio* ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` Così facendo, dovete verificare che ogni inizializzazione random di un peso del modello PyTorch e il suo corrispondente peso nel pretrained checkpoint siano esattamente gli stessi e uguali in **dimensione/shape e nome**. Per fare questo, é **necessario** aggiungere un `assert` per la dimensione/shape e nome: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` Inoltre, dovrete fare il print sia dei nomi che dei weights per essere sicuri che siano gli stessi: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` Se la dimensione o il nome non sono uguali, probabilmente avete sbagliato ad assegnare il peso nel checkpoint o nel layer costrutture di 🤗 Transformers. Una dimensione sbagliata può essere dovuta ad un errore nei parameteri in `BrandNewBertConfig()`. Tuttavia, può essere anche che l'implementazione del layer in PyTorch richieda di fare una transposizione della matrice dei weights. Infine, controllate **tutti** che tutti i weights inizializzati e fate print di tutti i weights del checkpoint che non sono stati usati per l'inizializzazione, di modo da essere sicuri che il modello sia correttamente convertito. É normale che ci siano errori nel test di conversione, fai per un errore in `BrandNewBertConfig()`, o un errore nell'architettura in 🤗 Transformers, o un bug in `init()`. Questo step dev'essere fatto tramite iterazioni fino a che non si raggiungano gli stessi valori per i weights. Una volta che il checkpoint é stato correttamente caricato in 🤗 Transformers, potete salvare il modello in una cartella di vostra scelta `/path/to/converted/checkpoint/folder` che contenga sia `pytorch_model.bin` che `config.json`: ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. Implementare il forward pass** Una volta che i weights pretrained sono stati correttamente caricati in 🤗 Transformers, dovrete assicurarvi che il forward pass sia correttamente implementato. [Qui](#3-4-provare-un-pretrained-checkpoint-usando-la-repo-originale), avete give creato e provato uno script che testi il forward pass del modello usando la repo originaria. Ora dovrete fare lo stesso con uno script analogo usando l'implementazione in 🤗 Transformers anziché l'originale. Piu o meno lo script dovrebbe essere: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` Di solito l'output da 🤗 Transformers non é uguale uguale all'output originario, sopratto la prima volta. Non vi abbattete - é normale! Prima di tutto assicuratevi che non ci siano errori o che non vengano segnalati degli errori nella forward pass. Spesso capita che ci siano dimensioni sbagliate o data type sbagliati, *ad esempio* `torch.long` anziche `torch.float32`. Non esistate a chiedere al team Hugging Face! Nella parte finale assicuratevi che l'implementazione 🤗 Transformers funzioni correttamente cosi da testare che gli output siano equivalenti a una precisione di `1e-3`. Controllate che `outputs.shape` siano le stesse tra 🤗 Transformers e l'implementazione originaria. Poi, controllate che i valori in output siano identici. Questa é sicuramente la parte più difficile, qui una serie di errori comuni quando gli output non sono uguali: - Alcuni layers non sono stati aggiunti, *ad esempio* un *activation* layer non é stato aggiunto, o ci si é scordati di una connessione - La matrice del word embedding non é stata ripareggiata - Ci sono degli embeddings posizionali sbagliati perché l'implementazione originaria ha un offset - Il dropout é in azione durante il forward pass. Per sistemare questo errore controllate che *model.training = False* e che il dropout non sia stato attivato nel forward pass, * per esempio * passate *self.training* a [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) La miglior maniera per sistemare il problema é di vedere all'implementazione originaria del forward pass e in 🤗 Transformers fianco a fianco e vedere se ci sono delle differenze. In teoria, con debug e print degli output intermedie di entrambe le implementazioni nel forward pass nell'esatta posizione del network dovrebbe aiutarvi a vedere dove ci sono differenze tra i due frameworks. Come prima mossa controllate che `input_ids` siano identici in entrambi gli scripts. Da lì andate fino all'ultimo layer. Potrete notare una differenza tra le due implementazioni a quel punto. Una volta che lo stesso output é stato ragguingi, verificate gli output con `torch.allclose(original_output, output, atol=1e-3)`. A questo punto se é tutto a posto: complimenti! Le parti seguenti saranno una passeggiata 😊. **8. Aggiungere i test necessari per il modello** A questo punto avete aggiunto con successo il vostro nuovo modello. Tuttavia, é molto probabile che il modello non sia del tutto ok con il design richiesto. Per essere sicuri che l'implementazione sia consona e compatibile con 🤗 Transformers é necessario implementare dei tests. Il Cookiecutter dovrebbe fornire automaticamente dei file per test per il vostro modello, di solito nella folder `tests/test_modeling_brand_new_bert.py`. Provate questo per verificare l'ok nei test piu comuni: ```bash pytest tests/test_modeling_brand_new_bert.py ``` Una volta sistemati i test comuni, bisogna assicurarsi che il vostro lavoro sia correttamente testato cosicchè: - a) La community puo capire in maniera semplice il vostro lavoro controllando tests specifici del modello *brand_new_bert*, - b) Implementazioni future del vostro modello non rompano alcune feature importante del modello. Per prima cosa agguingete dei test d'integrazione. Questi sono essenziali perche fanno la stessa funzione degli scripts di debug usati precedentemente. Un template per questi tests esiste gia nel Cookiecutter ed é sotto il nome di `BrandNewBertModelIntegrationTests`, voi dovrete solo completarlo. Una volta che questi tests sono OK, provate: ```bash RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Nel caso siate su Windows, sostituite `RUN_SLOW=1` con `SET RUN_SLOW=1` </Tip> Di seguito, tutte le features che sono utili e necessarire per *brand_new_bert* devono essere testate in test separati, contenuti in `BrandNewBertModelTester`/ `BrandNewBertModelTest`. spesso la gente si scorda questi test, ma ricordate che sono utili per: - Aiuta gli utenti a capire il vostro codice meglio, richiamando l'attenzione su queste nuove features - Developers e contributors futuri potranno velocemente testare nuove implementazioni del modello testanto questi casi speciali. **9. Implementare il tokenizer** A questo punto avremo bisogno un tokenizer per *brand_new_bert*. Di solito il tokenizer é uguale ad altri modelli in 🤗 Transformers. É importante che troviate il file con il tokenizer originale e che lo carichiate in 🤗 Transformers. Per controllare che il tokenizer funzioni in modo corretto, create uno script nella repo originaria che riceva come input una stringa e ritorni gli `input_ids`. Piu o meno questo potrebbe essere il codice: ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` Potrebbe richiedere un po' di tempo, ma guardate ancora alla repo originaria per trovare la funzione corretta del tokenizer. A volte capita di dover riscrivere il tokenizer nella repo originaria, di modo da avere come output gli `input_ids`. A quel punto uno script analogo é necessario in 🤗 Transformers: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` Una volta che `input_ids` sono uguali, bisogna aggiungere un test per il tokenizer. Il file test per tokenizer di *brand_new_brand* dovrebbe avere un paio di hard-coded test d'integrazione. **10. Test end-to-end** Ora che avete il tokenizer, dovrete aggiungere dei test d'integrazione per l'intero workflow in `tests/test_modeling_brand_new_bert.py` in 🤗 Transformer. Questi test devono mostrare che un significante campione text-to-text funzioni come ci si aspetta nell'implementazione di 🤗 Transformers. *Per esempio* potreste usare dei source-to-target-translation, o un sommario di un articolo, o un domanda-risposta e cosi via. Se nessuno dei checkpoints é stato ultra parametrizzato per task simili, allora i tests per il modello sono piu che sufficienti. Nello step finale dovete assicurarvi che il modello sia totalmente funzionale, e consigliamo anche di provare a testare su GPU. Puo succedere che ci si scordi un `.to(self.device)` ad esempio. Se non avete accesso a GPU, il team Hugging Face puo provvedere a testare questo aspetto per voi. **11. Aggiungere una Docstring** Siete quasi alla fine! L'ultima cosa rimasta é avere una bella docstring e una pagina doc. Il Cookiecutter dovrebbe provvedere già un template chiamato `docs/source/model_doc/brand_new_bert.rst`, che dovrete compilare. La prima cosa che un utente farà per usare il vostro modello sarà dare una bella lettura al doc. Quindi proponete una documentazione chiara e concisa. É molto utile per la community avere anche delle *Tips* per mostrare come il modello puo' essere usato. Non esitate a chiedere a Hugging Face riguardo alle docstirng. Quindi, assicuratevi che la docstring sia stata aggiunta a `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`. Assicuratevi che la docstring sia corretta e che includa tutti i necessari input e output. Abbiamo una guida dettagliata per scrivere la documentazione e docstring. **Rifattorizzare il codice** Perfetto! Ora che abbiamo tutto per *brand_new_bert* controllate che lo stile del codice sia ok: ```bash make style ``` E che il codice passi i quality check: ```bash make quality ``` A volte capita che manchino delle informazioninella docstring o alcuni nomi sbagliati, questo farà fallire i tests sopra. Ripetiamo: chiedete pure a Hugging Face, saremo lieti di aiutarvi. Per ultimo, fare del refactoring del codice una volta che é stato creato. Avete finito con il codice, congratulazioni! 🎉 Siete fantasticiiiiiii! 😎 **12. Caricare il modello sul model hub** In questa ultima parte dovrete convertire e caricare il modello, con tutti i checkpoints, nel model hub e aggiungere una model card per ogni checkpoint caricato. Leggete la nostra guida [Model sharing and uploading Page](model_sharing) per avere familiarità con l'hub. Di solito in questa parte lavorate a fianco di Hugging face per decidere un nome che sia ok per ogni checkpoint, per ottenere i permessi necessari per caricare il modello nell'organizzazione dell'autore di *brand_new_bert*. Il metodo `push_to_hub`, presente in tutti i modelli `transformers`, é una maniera rapida e indolore per caricare il vostro checkpoint sull'hub: ```python brand_new_bert.push_to_hub( repo_path_or_name="brand_new_bert", # Uncomment the following line to push to an organization # organization="<ORGANIZATION>", commit_message="Add model", use_temp_dir=True, ) ``` Vale la pena spendere un po' di tempo per creare una model card ad-hoc per ogni checkpoint. Le model cards dovrebbero suggerire le caratteristiche specifiche del checkpoint, *per esempio* su che dataset il checkpoint é stato pretrained o fine-tuned. O che su che genere di task il modello lavoro? E anche buona pratica includere del codice su come usare il modello correttamente. **13. (Opzionale) Aggiungere un notebook** É molto utile aggiungere un notebook, che dimostri in dettaglio come *brand_new_bert* si utilizzi per fare inferenza e/o fine-tuned su specifiche task. Non é una cosa obbligatoria da avere nella vostra PR, ma é molto utile per la community. **14. Sottomettere la PR** L'ultimissimo step! Ovvero il merge della PR nel main. Di solito il team Hugging face a questo punto vi avrà gia aiutato, ma é ok prendere un po' di tempo per pulire la descirzione e commenti nel codice. ### Condividete il vostro lavoro!! É ora tempo di prendere un po' di credito dalla communità per il vostro lavoro! Caricare e implementare un nuovo modello é un grandissimo contributo per Transformers e l'intera community NLP. Il codice e la conversione dei modelli pre-trained sara sicuramente utilizzato da centinaia o migliaia di sviluppatori e ricercatori. Siate fieri e orgogliosi di condividere il vostro traguardo con l'intera community :) ** Avete create un altro modello che é super facile da usare per tutti quanti nella community! 🤯**
transformers/docs/source/it/add_new_model.md/0
{ "file_path": "transformers/docs/source/it/add_new_model.md", "repo_id": "transformers", "token_count": 17225 }
33
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza Efficiente su GPU Multiple Questo documento contiene informazioni su come fare inferenza in maniera efficiente su GPU multiple. <Tip> Nota: Un setup con GPU multiple può utilizzare la maggior parte delle strategie descritte nella [sezione con GPU singola](./perf_infer_gpu_one). Tuttavia, è necessario conoscere delle tecniche semplici che possono essere utilizzate per un risultato migliore. </Tip> ## `BetterTransformer` per inferenza più rapida Abbiamo recentemente integrato `BetterTransformer` per inferenza più rapida su multi-GPU per modelli su testo, immagini e audio. Controlla il documento con queste integrazioni [qui](https://huggingface.co/docs/optimum/bettertransformer/overview) per maggiori dettagli.
transformers/docs/source/it/perf_infer_gpu_many.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_gpu_many.md", "repo_id": "transformers", "token_count": 420 }
34
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ このファイルはMarkdown形式ですが、特定の文法が含まれており、通常のMarkdownビューアーでは正しく表示されない場合があります。 --> # How to add a model to 🤗 Transformers? 🤗 Transformersライブラリは、コミュニティの貢献者のおかげで新しいモデルを提供できることがよくあります。 しかし、これは難しいプロジェクトであり、🤗 Transformersライブラリと実装するモデルについての深い知識が必要です。 Hugging Faceでは、コミュニティの多くの人々に積極的にモデルを追加する力を与えようと努力しており、 このガイドをまとめて、PyTorchモデルを追加するプロセスを説明します([PyTorchがインストールされていることを確認してください](https://pytorch.org/get-started/locally/))。 この過程で、以下のことを学びます: - オープンソースのベストプラクティスに関する洞察 - 最も人気のある深層学習ライブラリの設計原則を理解する - 大規模なモデルを効率的にテストする方法を学ぶ - `black`、`ruff`、および`make fix-copies`などのPythonユーティリティを統合して、クリーンで読みやすいコードを確保する方法を学ぶ Hugging Faceチームのメンバーがサポートを提供するので、一人ぼっちになることはありません。 🤗 ❤️ さあ、始めましょう!🤗 Transformersで見たいモデルについての[New model addition](https://github.com/huggingface/transformers/issues/new?assignees=&labels=New+model&template=new-model-addition.yml)のイシューを開いてください。 特定のモデルを提供することに特にこだわりがない場合、[New model label](https://github.com/huggingface/transformers/labels/New%20model)で未割り当てのモデルリクエストがあるかどうかを確認して、それに取り組むことができます。 新しいモデルリクエストを開いたら、最初のステップは🤗 Transformersをよく理解することです! ## General overview of 🤗 Transformers まず、🤗 Transformersの一般的な概要を把握する必要があります。🤗 Transformersは非常に意見が分かれるライブラリですので、 ライブラリの哲学や設計選択について同意できない可能性があります。ただし、私たちの経験から、ライブラリの基本的な設計選択と哲学は、 🤗 Transformersを効率的にスケーリングし、適切なレベルで保守コストを抑えるために不可欠です。 ライブラリの理解を深めるための良い出発点は、[哲学のドキュメント](philosophy)を読むことです。 私たちの作業方法の結果、すべてのモデルに適用しようとするいくつかの選択肢があります: - 一般的に、抽象化よりも構成が優先されます。 - コードの重複は、読みやすさやアクセス可能性を大幅に向上させる場合、必ずしも悪いわけではありません。 - モデルファイルはできるだけ自己完結的であるべきで、特定のモデルのコードを読む際には、理想的には該当する`modeling_....py`ファイルのみを見る必要があります。 私たちの意見では、このライブラリのコードは単なる製品を提供する手段だけでなく、*例えば、推論のためにBERTを使用する能力*などの製品そのもの. ### Overview of models モデルを正常に追加するためには、モデルとその設定、[`PreTrainedModel`]、および[`PretrainedConfig`]の相互作用を理解することが重要です。 例示的な目的で、🤗 Transformersに追加するモデルを「BrandNewBert」と呼びます。 以下をご覧ください: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/> ご覧のように、🤗 Transformersでは継承を使用していますが、抽象化のレベルを最小限に保っています。 ライブラリ内のどのモデルにも、抽象化のレベルが2つを超えることはありません。 `BrandNewBertModel` は `BrandNewBertPreTrainedModel` を継承し、さらに[`PreTrainedModel`]を継承しています。 これだけです。 一般的なルールとして、新しいモデルは[`PreTrainedModel`]にのみ依存するようにしたいと考えています。 すべての新しいモデルに自動的に提供される重要な機能は、[`~PreTrainedModel.from_pretrained`]および [`~PreTrainedModel.save_pretrained`]です。 これらはシリアライゼーションとデシリアライゼーションに使用されます。 `BrandNewBertModel.forward`などの他の重要な機能は、新しい「modeling_brand_new_bert.py」スクリプトで完全に定義されるべきです。 次に、特定のヘッドレイヤーを持つモデル(たとえば `BrandNewBertForMaskedLM` )が `BrandNewBertModel` を継承するのではなく、 抽象化のレベルを低く保つために、そのフォワードパスで `BrandNewBertModel` を呼び出すコンポーネントとして使用されるようにしたいと考えています。 新しいモデルには常に `BrandNewBertConfig` という設定クラスが必要です。この設定は常に[`PreTrainedModel`]の属性として保存され、 したがって、`BrandNewBertPreTrainedModel`から継承するすべてのクラスで`config`属性を介してアクセスできます。 ```python model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") model.config # model has access to its config ``` モデルと同様に、設定は[`PretrainedConfig`]から基本的なシリアル化および逆シリアル化の機能を継承しています。注意すべきは、設定とモデルは常に2つの異なる形式にシリアル化されることです - モデルは*pytorch_model.bin*ファイルに、設定は*config.json*ファイルにシリアル化されます。[`~PreTrainedModel.save_pretrained`]を呼び出すと、自動的に[`~PretrainedConfig.save_pretrained`]も呼び出され、モデルと設定の両方が保存されます。 ### Code style 新しいモデルをコーディングする際には、Transformersは意見があるライブラリであり、コードの書き方に関していくつかの独自の考え方があります :-) 1. モデルのフォワードパスはモデリングファイルに完全に記述され、ライブラリ内の他のモデルとは完全に独立している必要があります。他のモデルからブロックを再利用したい場合、コードをコピーしてトップに`# Copied from`コメントを付けて貼り付けます(良い例は[こちら](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160)、コピーに関する詳細なドキュメンテーションは[ここ](pr_checks#check-copies)を参照してください)。 2. コードは完全に理解可能でなければなりません。これは記述的な変数名を選択し、省略形を避けるべきであることを意味します。例えば、`act`ではなく`activation`が好まれます。1文字の変数名は、forループ内のインデックスでない限り、強く非推奨です。 3. より一般的に、魔法のような短いコードよりも長くて明示的なコードを好みます。 4. PyTorchでは`nn.Sequential`をサブクラス化せずに、`nn.Module`をサブクラス化し、フォワードパスを記述し、コードを使用する他の人が簡単にデバッグできるようにします。プリントステートメントやブレークポイントを追加してデバッグできるようにします。 5. 関数のシグネチャは型アノテーションを付けるべきです。その他の部分に関しては、型アノテーションよりも良い変数名が読みやすく理解しやすいことがあります。 ### Overview of tokenizers まだ完了していません :-( このセクションは近日中に追加されます! ## Step-by-step recipe to add a model to 🤗 Transformers モデルを追加する方法は人それぞれ異なるため、他のコントリビューターが🤗 Transformersにモデルを追加する際の要約を確認することが非常に役立つ場合があります。以下は、他のコントリビューターが🤗 Transformersにモデルをポートする際のコミュニティブログ投稿のリストです。 1. [GPT2モデルのポーティング](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) by [Thomas](https://huggingface.co/thomwolf) 2. [WMT19 MTモデルのポーティング](https://huggingface.co/blog/porting-fsmt) by [Stas](https://huggingface.co/stas) 経験から言えることは、モデルを追加する際に最も重要なことは次のようになります: - 車輪の再発明をしないでください!新しい🤗 Transformersモデルのために追加するコードのほとんどはすでに🤗 Transformers内のどこかに存在しています。類似した既存のモデルやトークナイザを見つけるために、いくつかの時間をかけて探すことが重要です。[grep](https://www.gnu.org/software/grep/)と[rg](https://github.com/BurntSushi/ripgrep)はあなたの友達です。モデルのトークナイザは1つのモデル実装に基づいているかもしれませんが、モデルのモデリングコードは別の実装に基づいていることがあることに注意してください。例えば、FSMTのモデリングコードはBARTに基づいており、FSMTのトークナイザコードはXLMに基づいています。 - これは科学的な課題よりもエンジニアリングの課題です。モデルの論文の理論的な側面をすべて理解しようとするよりも、効率的なデバッグ環境を作成するために時間を費やすべきです。 - 行き詰まった場合は助けを求めてください!モデルは🤗 Transformersのコアコンポーネントであり、Hugging Faceではモデルを追加するための各ステップでお手伝いするのを喜んでいます。進行がないことに気付いた場合は、進展していないことを気にしないでください。 以下では、🤗 Transformersにモデルをポートする際に最も役立つと考えられる一般的なレシピを提供しようとしています。 次のリストは、モデルを追加するために行う必要があるすべてのことの要約であり、To-Doリストとして使用できます: - ☐ (オプション)モデルの理論的な側面を理解しました - ☐ 🤗 Transformersの開発環境を準備しました - ☐ オリジナルのリポジトリのデバッグ環境をセットアップしました - ☐ `forward()` パスをオリジナルのリポジトリとチェックポイントで正常に実行するスクリプトを作成しました - ☐ モデルの骨格を🤗 Transformersに正常に追加しました - ☐ オリジナルのチェックポイントを🤗 Transformersのチェックポイントに正常に変換しました - ☐ 🤗 Transformersで実行される `forward()` パスを正常に実行し、オリジナルのチェックポイントと同一の出力を得ました - ☐ 🤗 Transformersでのモデルテストを完了しました - ☐ 🤗 Transformersにトークナイザを正常に追加しました - ☐ エンドツーエンドの統合テストを実行しました - ☐ ドキュメントを完成させました - ☐ モデルのウェイトをHubにアップロードしました - ☐ プルリクエストを提出しました - ☐ (オプション)デモノートブックを追加しました まず、通常、`BrandNewBert`の理論的な理解を深めることをお勧めします。 ただし、もしモデルの理論的な側面を「実務中に理解する」方が好ましい場合、`BrandNewBert`のコードベースに直接アクセスするのも問題ありません。 このオプションは、エンジニアリングのスキルが理論的なスキルよりも優れている場合、 `BrandNewBert`の論文を理解するのに苦労している場合、または科学的な論文を読むよりもプログラミングを楽しんでいる場合に適しています。 ### 1. (Optional) Theoretical aspects of BrandNewBert BrandNewBertの論文がある場合、その説明を読むための時間を取るべきです。論文の中には理解が難しい部分があるかもしれません。 その場合でも心配しないでください。目標は論文の深い理論的理解を得ることではなく、 🤗 Transformersでモデルを効果的に再実装するために必要な情報を抽出することです。 ただし、理論的な側面にあまり多くの時間をかける必要はありません。代わりに、実践的な側面に焦点を当てましょう。具体的には次の点です: - *brand_new_bert*はどの種類のモデルですか? BERTのようなエンコーダーのみのモデルですか? GPT2のようなデコーダーのみのモデルですか? BARTのようなエンコーダー-デコーダーモデルですか? [model_summary](model_summary)を参照して、これらの違いについて詳しく知りたい場合があります。 - *brand_new_bert*の応用分野は何ですか? テキスト分類ですか? テキスト生成ですか? Seq2Seqタスク、例えば要約ですか? - モデルをBERT/GPT-2/BARTとは異なるものにする新しい機能は何ですか? - 既存の[🤗 Transformersモデル](https://huggingface.co/transformers/#contents)の中で*brand_new_bert*に最も似ているモデルはどれですか? - 使用されているトークナイザの種類は何ですか? SentencePieceトークナイザですか? WordPieceトークナイザですか? BERTやBARTで使用されているトークナイザと同じですか? モデルのアーキテクチャの良い概要を得たと感じたら、Hugging Faceチームに質問を送ることができます。 これにはモデルのアーキテクチャ、注意層などに関する質問が含まれるかもしれません。 私たちは喜んでお手伝いします。 ### 2. Next prepare your environment 1. リポジトリのページで「Fork」ボタンをクリックして、[リポジトリ](https://github.com/huggingface/transformers)をフォークします。 これにより、コードのコピーがGitHubユーザーアカウントの下に作成されます。 2. ローカルディスクにある`transformers`フォークをクローンし、ベースリポジトリをリモートとして追加します: ```bash git clone https://github.com/[your Github handle]/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` 3. 開発環境をセットアップするために、次のコマンドを実行してください: ```bash python -m venv .env source .env/bin/activate pip install -e ".[dev]" ``` お使いのOSに応じて、およびTransformersのオプションの依存関係の数が増えているため、このコマンドでエラーが発生する可能性があります。 その場合は、作業しているDeep Learningフレームワーク(PyTorch、TensorFlow、および/またはFlax)をインストールし、次の手順を実行してください: ```bash pip install -e ".[quality]" ``` これはほとんどのユースケースには十分であるはずです。その後、親ディレクトリに戻ることができます。 ```bash cd .. ``` 4. Transformersに*brand_new_bert*のPyTorchバージョンを追加することをお勧めします。PyTorchをインストールするには、 https://pytorch.org/get-started/locally/ の指示に従ってください。 **注意:** CUDAをインストールする必要はありません。新しいモデルをCPUで動作させることで十分です。 5. *brand_new_bert*を移植するには、元のリポジトリへのアクセスも必要です。 ```bash git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` *brand_new_bert*を🤗 Transformersにポートするための開発環境を設定しました。 ### 3.-4. Run a pretrained checkpoint using the original repository 最初に、オリジナルの*brand_new_bert*リポジトリで作業します。通常、オリジナルの実装は非常に「研究的」であり、ドキュメンテーションが不足していたり、コードが理解しにくいことがあります。しかし、これが*brand_new_bert*を再実装する動機となるべきです。Hugging Faceでは、主要な目標の1つが、動作するモデルを取り、それをできるだけ**アクセス可能でユーザーフレンドリーで美しい**ものに書き直すことです。これは、🤗 Transformersにモデルを再実装する最も重要な動機です - 複雑な新しいNLP技術を**誰にでも**アクセス可能にしようとする試みです。 まず、オリジナルのリポジトリに入り込むことから始めるべきです。 公式の事前学習済みモデルをオリジナルのリポジトリで正常に実行することは、通常、**最も困難な**ステップです。 私たちの経験から、オリジナルのコードベースに慣れるのに時間をかけることが非常に重要です。以下のことを理解する必要があります: - 事前学習済みの重みをどこで見つけるか? - 対応するモデルに事前学習済みの重みをロードする方法は? - モデルから独立してトークナイザを実行する方法は? - 1つのフォワードパスを追跡して、単純なフォワードパスに必要なクラスと関数がわかるようにします。通常、これらの関数だけを再実装する必要があります。 - モデルの重要なコンポーネントを特定できること:モデルのクラスはどこにありますか?モデルのサブクラス、*例* EncoderModel、DecoderModelがありますか?自己注意レイヤーはどこにありますか?複数の異なる注意レイヤー、*例* *自己注意*、*クロスアテンション*などが存在しますか? - オリジナルのリポジトリの環境でモデルをデバッグする方法は?*print*ステートメントを追加する必要があるか、*ipdb*のような対話型デバッガを使用できるか、PyCharmのような効率的なIDEを使用してモデルをデバッグする必要がありますか? 重要なのは、ポーティングプロセスを開始する前に、オリジナルのリポジトリでコードを**効率的に**デバッグできることです!また、これはオープンソースライブラリで作業していることを覚えておいてください。オリジナルのリポジトリでコードを調べる誰かを歓迎するために、問題をオープンにしたり、プルリクエストを送信したりすることをためらわないでください。このリポジトリのメンテナーは、彼らのコードを調べてくれる人に対して非常に喜んでいる可能性が高いです! この段階では、オリジナルのモデルのデバッグにどのような環境と戦略を使用するかは、あなた次第です。最初にオリジナルのリポジトリに関するコードをデバッグできることが非常に重要です。また、GPU環境をセットアップすることはお勧めしません。まず、CPU上で作業し、モデルがすでに🤗 Transformersに正常にポートされていることを確認します。最後に、モデルがGPU上でも期待通りに動作するかどうかを検証する必要があります。 一般的に、オリジナルのモデルを実行するための2つのデバッグ環境があります: - [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) - ローカルなPythonスクリプト。 Jupyterノートブックは、セルごとに実行できるため、論理的なコンポーネントをより分割し、中間結果を保存できるため、デバッグサイクルが速くなるという利点があります。また、ノートブックは他の共同作業者と簡単に共有できることが多く、Hugging Faceチームに助けを求める場合に非常に役立つ場合があります。Jupyterノートブックに精通している場合、それ ```python model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids original_output = model.predict(input_ids) ``` デバッグ戦略については、通常、いくつかの選択肢があります: - 元のモデルを多くの小さなテスト可能なコンポーネントに分解し、それぞれに対して前方パスを実行して検証します - 元のモデルを元のトークナイザと元のモデルにのみ分解し、それらに対して前方パスを実行し、検証のために中間のプリントステートメントまたはブレークポイントを使用します 再度、どの戦略を選択するかはあなた次第です。元のコードベースに依存することが多く、元のコードベースに応じて一方または他方が有利なことがあります。 元のコードベースがモデルを小さなサブコンポーネントに分解できる場合、*例えば*元のコードベースが簡単にイーガーモードで実行できる場合、それを行う価値が通常あります。最初からより難しい方法を選択することにはいくつかの重要な利点があります: - 後で元のモデルを🤗 Transformersの実装と比較する際に、各コンポーネントが対応する🤗 Transformers実装のコンポーネントと一致することを自動的に検証できるため、視覚的な比較に依存せずに済みます - 大きな問題を小さな問題に分解する、つまり個々のコンポーネントのみをポーティングする問題に分割するのに役立ち、作業を構造化するのに役立ちます - モデルを論理的な意味のあるコンポーネントに分割することで、モデルの設計をよりよく理解しやすくし、モデルをよりよく理解するのに役立ちます - 後で、コンポーネントごとのテストを行うことで、コードを変更し続ける際にリグレッションが発生しないことを確認するのに役立ちます [Lysandreの](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) ELECTRAの統合チェックは、これがどのように行われるかの良い例です。 ただし、元のコードベースが非常に複雑で、中間コンポーネントをコンパイルモードで実行することしか許可しない場合、モデルを小さなテスト可能なサブコンポーネントに分解することが時間がかかりすぎるか、不可能であることがあります。 良い例は[T5のMeshTensorFlow](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow)ライブラリであり、非常に複雑でモデルをサブコンポーネントに分解する簡単な方法を提供しないことがあります。このようなライブラリでは、通常、プリントステートメントを検証することに依存します。 どの戦略を選択しても、推奨される手順は通常同じで、最初のレイヤーからデバッグを開始し、最後のレイヤーからデバッグを行うべきです。 通常、以下の順序で次のレイヤーからの出力を取得することをお勧めします: 1. モデルに渡された入力IDを取得する 2. 単語の埋め込みを取得する 3. 最初のTransformerレイヤーの入力を取得する 4. 最初のTransformerレイヤーの出力を取得する 5. 次のn - 1つのTransformerレイヤーの出力を取得する 6. BrandNewBertモデル全体の出力を取得する 入力IDは整数の配列である必要があり、*例:* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` のようになります。 以下のレイヤーの出力は多次元の浮動小数点配列であることが多く、次のようになることがあります: ``` [[ [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], ..., [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], ``` 🤗 Transformersに追加されるすべてのモデルは、統合テストを数回合格することが期待されており、元のモデルと🤗 Transformersで再実装されたバージョンが、0.001の精度までまったく同じ出力を提供する必要があります。 異なるライブラリフレームワークで同じモデルを書いた場合、わずかに異なる出力を返すことが正常であるため、誤差許容値として1e-3(0.001)を受け入れています。モデルがほぼ同じ出力を返すだけでは不十分で、ほぼ同一である必要があります。そのため、🤗 Transformersバージョンの中間出力を元の*brand_new_bert*の実装の中間出力と複数回にわたって比較することになるでしょう。その際、元のリポジトリの**効率的な**デバッグ環境が非常に重要です。以下は、デバッグ環境をできるだけ効率的にするためのアドバイスです。 - 中間結果をデバッグする最適な方法を見つける。元のリポジトリはPyTorchで書かれていますか?その場合、元のモデルをより小さなサブコンポーネントに分解して中間値を取得する長いスクリプトを書くことがおそらく適切です。元のリポジトリがTensorflow 1で書かれている場合、[tf.print](https://www.tensorflow.org/api_docs/python/tf/print)などのTensorFlowのプリント操作を使用して中間値を出力する必要があるかもしれません。元のリポジトリがJaxで書かれている場合、フォワードパスの実行時にモデルが**jittedされていない**ことを確認してください。例:[このリンク](https://github.com/google/jax/issues/196)をチェック。 - 使用可能な最小の事前学習済みチェックポイントを使用します。チェックポイントが小さいほど、デバッグサイクルが速くなります。事前学習済みモデルがフォワードパスに10秒以上かかる場合、効率的ではありません。非常に大きなチェックポイントしか利用できない場合、新しい環境でランダムに初期化されたウェイトを持つダミーモデルを作成し、それらのウェイトを🤗 Transformersバージョンのモデルと比較する方が良いかもしれません。 - 元のリポジトリでフォワードパスを呼び出す最も簡単な方法を使用していることを確認してください。理想的には、元のリポジトリで**単一のフォワードパス**を呼び出す関数を見つけたいです。これは通常「predict」、「evaluate」、「forward」、「__call__」と呼ばれます。複数回「forward」を呼び出す関数をデバッグしたくありません。例:テキストを生成するために「autoregressive_sample」、「generate」と呼ばれる関数。 - トークナイゼーションとモデルの「フォワード」パスを分離しようとしてください。元のリポジトリが入力文字列を入力する必要がある例を示す場合、フォワードコール内で文字列入力が入力IDに変更される場所を特定し、このポイントから開始します。これは、スクリプトを自分で書くか、入力文字列ではなく入力IDを直接入力できるように元のコードを変更する必要があるかもしれません。 - デバッグセットアップ内のモデルがトレーニングモードではないことを確認してください。トレーニングモードでは、モデル内の複数のドロップアウトレイヤーのためにランダムな出力が生成されることがあります。デバッグ環境のフォワードパスが**決定論的**であることを確認し、ドロップアウトレイヤーが使用されないようにします。または、新しい実装が同じフレームワーク内にある場合、*transformers.utils.set_seed*を使用してください。 以下のセクションでは、*brand_new_bert*についてこれを具体的にどのように行うかについての詳細/ヒントを提供します。 ### 5.-14. Port BrandNewBert to 🤗 Transformers 次に、ついに新しいコードを🤗 Transformersに追加できます。🤗 Transformersのフォークのクローンに移動してください: ```bash cd transformers ``` 特別なケースとして、既存のモデルと完全に一致するアーキテクチャのモデルを追加する場合、 [このセクション](#write-a-conversion-script)で説明されているように、変換スクリプトを追加するだけで済みます。 この場合、既存のモデルの完全なモデルアーキテクチャを再利用できます。 それ以外の場合は、新しいモデルの生成を開始しましょう。 次のスクリプトを使用して、以下から始まるモデルを追加することをお勧めします。 既存のモデル: ```bash transformers-cli add-new-model-like ``` モデルの基本情報を入力するためのアンケートが表示されます。 **主要な huggingface/transformers リポジトリでプルリクエストを開く** 自動生成されたコードを適応し始める前に、🤗 Transformers に「作業中(WIP)」プルリクエストを開くタイミングです。 例:「[WIP] *brand_new_bert* を追加」などです。 これにより、ユーザーと Hugging Face チームが🤗 Transformers にモデルを統合する作業を並行して行うことができます。 以下の手順を実行してください: 1. メインブランチから分かりやすい名前のブランチを作成します。 ```bash git checkout -b add_brand_new_bert ``` 2. 自動生成されたコードをコミットしてください: ```bash git add . git commit ``` 3. 現在の main ブランチにフェッチしてリベース ```bash git fetch upstream git rebase upstream/main ``` 4. 変更をあなたのアカウントにプッシュするには、次のコマンドを使用します: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 5. 満足したら、GitHub上のフォークのウェブページに移動します。[プルリクエスト]をクリックします。将来の変更に備えて、Hugging Face チームのメンバーのGitHubハンドルをレビュアーとして追加してください。 6. GitHubのプルリクエストウェブページの右側にある「ドラフトに変換」をクリックして、PRをドラフトに変更します。 以下では、進捗があった場合は常に作業をコミットし、プッシュしてプルリクエストに表示されるようにしてください。さらに、定期的にメインからの最新の変更を取り込むために、次のように行うことを忘れないでください: ```bash git fetch upstream git merge upstream/main ``` 一般的に、モデルや実装に関する質問はPull Request (PR) で行い、PR内で議論し、解決します。 これにより、Hugging Face チームは新しいコードをコミットする際や質問がある場合に常に通知を受けることができます。 質問や問題が解決された際に、問題や質問が理解されやすいように、Hugging Face チームにコードを指摘することが非常に役立ちます。 このためには、「Files changed」タブに移動してすべての変更を表示し、質問したい行に移動して「+」シンボルをクリックしてコメントを追加します。 質問や問題が解決された場合は、作成されたコメントの「Resolve」ボタンをクリックできます。 同様に、Hugging Face チームはコードをレビューする際にコメントを開きます。 PR上でのほとんどの質問はGitHub上で行うことをお勧めします。 一般的な質問に関しては、公にはあまり役立たない質問については、SlackやメールでHugging Face チームに連絡することもできます。 **5. 生成されたモデルコードを"brand_new_bert"に適応させる** 最初に、モデル自体に焦点を当て、トークナイザには気にしないでください。 関連するコードは、生成されたファイル`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`および`src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`で見つかるはずです。 さて、ついにコーディングを始めることができます :smile:。 `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`にある生成されたコードは、エンコーダーのみのモデルであればBERTと同じアーキテクチャを持っているか、エンコーダー-デコーダーモデルであればBARTと同じアーキテクチャを持っているはずです。 この段階では、モデルの理論的な側面について学んだことを思い出すべきです。つまり、「このモデルはBERTまたはBARTとどのように異なるのか?」ということです。 これらの変更を実装しますが、これは通常、セルフアテンションレイヤー、正規化レイヤーの順序などを変更することを意味します。 再び、あなたのモデルがどのように実装されるべきかをより良く理解するために、Transformers内に既存のモデルの類似アーキテクチャを見ることが役立つことがあります。 この時点では、コードが完全に正確またはクリーンである必要はありません。 むしろ、まずは必要なコードの最初の*クリーンでない*コピー&ペーストバージョンを `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`に追加し、必要なコードがすべて追加されていると感じるまで改善/修正を反復的に行うことがお勧めです。 私たちの経験から、必要なコードの最初のバージョンを迅速に追加し、次のセクションで説明する変換スクリプトを使用してコードを繰り返し改善/修正する方が効率的であることが多いです。 この時点で動作する必要があるのは、🤗 Transformersの"brand_new_bert"の実装をインスタンス化できることだけです。つまり、以下のコマンドが機能する必要があります: ```python from transformers import BrandNewBertModel, BrandNewBertConfig model = BrandNewBertModel(BrandNewBertConfig()) ``` 上記のコマンドは、`BrandNewBertConfig()` で定義されたデフォルトパラメータに従ってモデルを作成し、 すべてのコンポーネントの `init()` メソッドが正常に動作することを確認します。 すべてのランダムな初期化は、`BrandnewBertPreTrainedModel` クラスの `_init_weights` メソッドで行う必要があります。 このメソッドは、設定変数に依存するすべてのリーフモジュールを初期化する必要があります。以下は、BERT の `_init_weights` メソッドの例です: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) ``` 特定のモジュールに特別な初期化が必要な場合、カスタムスキームをさらに持つことができます。たとえば、 `Wav2Vec2ForPreTraining`では、最後の2つの線形層には通常のPyTorchの`nn.Linear`の初期化が必要ですが、 他のすべての層は上記のような初期化を使用する必要があります。これは以下のようにコーディングされています: ```py def _init_weights(self, module): """Initialize the weights""" if isinstance(module, Wav2Vec2ForPreTraining): module.project_hid.reset_parameters() module.project_q.reset_parameters() module.project_hid._is_hf_initialized = True module.project_q._is_hf_initialized = True elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() ``` `_is_hf_initialized`フラグは、サブモジュールを一度だけ初期化することを確実にするために内部で使用されます。 `module.project_q`と`module.project_hid`のためにそれを`True`に設定することで、 カスタム初期化が後で上書きされないようにし、`_init_weights`関数がそれらに適用されないようにします。 **6. 変換スクリプトを書く** 次に、*brand_new_bert* の元のリポジトリでデバッグに使用したチェックポイントを、新しく作成した 🤗 Transformers 実装の *brand_new_bert* と互換性のあるチェックポイントに変換できる変換スクリプトを書く必要があります。 変換スクリプトをゼロから書くことはお勧めされませんが、代わりに 🤗 Transformers で既に存在する類似のモデルを同じフレームワークで変換したスクリプトを調べることが良いでしょう。 通常、既存の変換スクリプトをコピーして、自分のユースケースにわずかに適応させることで十分です。 Hugging Face チームに既存のモデルに類似した変換スクリプトを教えてもらうことも躊躇しないでください。 - TensorFlowからPyTorchにモデルを移植している場合、良い出発点はBERTの変換スクリプトかもしれません [here](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) - PyTorchからPyTorchにモデルを移植している場合、良い出発点はBARTの変換スクリプトかもしれません [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) 以下では、PyTorchモデルが層の重みをどのように保存し、層の名前を定義するかについて簡単に説明します。 PyTorchでは、層の名前は層に与えるクラス属性の名前によって定義されます。 PyTorchで `SimpleModel` というダミーモデルを定義しましょう: ```python from torch import nn class SimpleModel(nn.Module): def __init__(self): super().__init__() self.dense = nn.Linear(10, 10) self.intermediate = nn.Linear(10, 10) self.layer_norm = nn.LayerNorm(10) ``` これで、このモデル定義のインスタンスを作成し、`dense`、`intermediate`、`layer_norm`のすべての重みをランダムな重みで埋めたモデルを作成できます。モデルのアーキテクチャを確認するために、モデルを印刷してみましょう。 ```python model = SimpleModel() print(model) ``` これは以下を出力します: ``` SimpleModel( (dense): Linear(in_features=10, out_features=10, bias=True) (intermediate): Linear(in_features=10, out_features=10, bias=True) (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) ) ``` 層の名前はPyTorchのクラス属性の名前によって定義されています。特定の層の重み値を出力することができます: ```python print(model.dense.weight.data) ``` ランダムに初期化された重みを確認するために ``` tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, -0.2077, 0.2157], [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, 0.2166, -0.0212], [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, -0.1023, -0.0447], [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, -0.1876, -0.2467], [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, 0.2577, 0.0402], [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, 0.2132, 0.1680], [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, 0.2707, -0.2509], [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, 0.1829, -0.1568], [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, 0.0333, -0.0536], [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, 0.2220, 0.2358]]). ``` スクリプト内の変換スクリプトでは、ランダムに初期化された重みを、対応するチェックポイント内の正確な重みで埋める必要があります。例えば、以下のように翻訳します: ```python # retrieve matching layer weights, e.g. by # recursive algorithm layer_name = "dense" pretrained_weight = array_of_dense_layer model_pointer = getattr(model, "dense") model_pointer.weight.data = torch.from_numpy(pretrained_weight) ``` PyTorchモデルの各ランダム初期化された重みと対応する事前学習済みチェックポイントの重みが **形状と名前の両方**で正確に一致することを確認する必要があります。 これを行うために、形状に対するassertステートメントを追加し、チェックポイントの重みの名前を出力することが **必要不可欠**です。例えば、次のようなステートメントを追加する必要があります: ```python assert ( model_pointer.weight.shape == pretrained_weight.shape ), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" ``` また、両方の重みの名前を印刷して、一致していることを確認する必要があります。例えば、次のようにします: ```python logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") ``` もし形状または名前のいずれかが一致しない場合、おそらく誤って🤗 Transformersの実装に初期化されたレイヤーに間違ったチェックポイントの重みを割り当ててしまった可能性があります。 誤った形状は、おそらく`BrandNewBertConfig()`での設定パラメーターが、変換したいチェックポイントで使用されたものと正確に一致しないためです。 ただし、PyTorchのレイヤーの実装によっては、重みを事前に転置する必要がある場合もあります。 最後に、**すべて**の必要な重みが初期化されていることを確認し、初期化に使用されなかったすべてのチェックポイントの重みを表示して、モデルが正しく変換されていることを確認してください。 変換トライアルが誤った形状ステートメントまたは誤った名前割り当てで失敗するのは完全に正常です。 これはおそらく、`BrandNewBertConfig()`で誤ったパラメーターを使用したか、🤗 Transformersの実装に誤ったアーキテクチャがあるか、🤗 Transformersの実装の1つのコンポーネントの`init()`関数にバグがあるか、チェックポイントの重みの1つを転置する必要があるためです。 このステップは、以前のステップと繰り返すべきです。すべてのチェックポイントの重みが正しく🤗 Transformersモデルに読み込まれるまで繰り返すべきです。 🤗 Transformers実装に正しくチェックポイントを読み込んだ後、選択したフォルダーにモデルを保存できます `/path/to/converted/checkpoint/folder`。このフォルダには`pytorch_model.bin`ファイルと`config.json`ファイルの両方が含まれるはずです。 ```python model.save_pretrained("/path/to/converted/checkpoint/folder") ``` **7. 順伝播(forward pass)の実装** 🤗 Transformers実装で事前学習済みの重みを正しく読み込んだ後、順伝播が正しく実装されていることを確認する必要があります。[元のリポジトリを理解する](#3-4-run-a-pretrained-checkpoint-using-the-original-repository)で、元のリポジトリを使用してモデルの順伝播を実行するスクリプトをすでに作成しました。今度は、元のリポジトリの代わりに🤗 Transformers実装を使用して類似のスクリプトを作成する必要があります。以下のようになります: ```python model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] output = model(input_ids).last_hidden_states ``` 🤗 Transformersの実装と元のモデルの実装が最初の実行で完全に同じ出力を提供しないか、 フォワードパスでエラーが発生する可能性が非常に高いです。失望しないでください - これは予想されていることです! まず、フォワードパスがエラーをスローしないことを確認する必要があります。 間違った次元が使用され、*次元の不一致*エラーや、誤ったデータ型オブジェクトが使用されることがよくあります。 例えば、`torch.long`ではなく`torch.float32`が使用されます。特定のエラーを解決できない場合は、 Hugging Faceチームに助けを求めることを躊躇しないでください。 🤗 Transformers実装が正しく機能することを確認する最終的な部分は、出力が`1e-3`の精度で同等であることを確認することです。 まず、出力の形状が同一であること、つまりスクリプトの🤗 Transformers実装と元の実装の両方で`outputs.shape`が同じ値を生成する必要があります。 次に、出力値が同一であることを確認する必要があります。 これは新しいモデルを追加する際の最も難しい部分の1つです。 出力が同一でない理由の一般的な間違いは以下の通りです。 - 一部のレイヤーが追加されていない、つまり*活性化*レイヤーが追加されていないか、リザバル接続が忘れられている - 単語埋め込み行列が結ばれていない - オリジナルの実装がオフセットを使用しているため、誤った位置埋め込みが使用されている - フォワードパス中にドロップアウトが適用されています。これを修正するには、*model.trainingがFalse*であることを確認し、フォワードパス中に誤ってドロップアウトレイヤーがアクティブ化されないようにします。 *つまり* [PyTorchのfunctional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout)に*model.training*を渡します。 問題を修正する最良の方法は、通常、元の実装と🤗 Transformers実装のフォワードパスを並べて表示し、違いがあるかどうかを確認することです。 理想的には、フォワードパスの両方の実装の中間出力をデバッグ/プリントアウトして、🤗 Transformers実装が元の実装と異なる出力を示すネットワーク内の正確な位置を見つけることができます。 最初に、両方のスクリプトのハードコーディングされた`input_ids`が同一であることを確認します。 次に、`input_ids`の最初の変換(通常、単語埋め込み)の出力が同一であることを確認します。 その後、ネットワークの最後のレイヤーまで作業を進めます。 いずれかの時点で、2つの実装間で違いがあることに気付くはずで、それにより🤗 Transformers実装のバグの場所が特定されます。 経験上、元の実装と🤗 Transformers実装のフォワードパスの同じ位置に多くのプリントステートメントを追加し、 中間プレゼンテーションで同じ値を示すプリントステートメントを段階的に削除するのがシンプルかつ効果的な方法です。 両方の実装が同じ出力を生成することに自信を持っている場合、`torch.allclose(original_output, output, atol=1e-3)`を使用して出力を確認すると、最も難しい部分が完了します! おめでとうございます - 完了する作業は簡単なものになるはずです 😊。 **8. 必要なすべてのモデルテストを追加** この時点で、新しいモデルが正常に追加されました。 ただし、モデルがまだ必要な設計に完全に準拠していない可能性が非常に高いです。 🤗 Transformersと完全に互換性があることを確認するために、すべての一般的なテストがパスする必要があります。 Cookiecutterはおそらくモデル用のテストファイルを自動的に追加しているはずで、おそらく同じディレクトリに`tests/models/brand_new_bert/test_modeling_brand_new_bert.py`として存在します。 このテストファイルを実行して、すべての一般的なテストがパスすることを確認してください: ```bash pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` すべての一般的なテストを修正したら、今度は実行したすべての素晴らしい作業が適切にテストされていることを確認することが非常に重要です。これにより、 - a) コミュニティは*brand_new_bert*の特定のテストを見ることで、あなたの作業を簡単に理解できます。 - b) モデルへの将来の変更がモデルの重要な機能を壊さないようにすることができます。 まず、統合テストを追加する必要があります。これらの統合テストは、基本的にはデバッグスクリプトと同じことを行います。これらのモデルテストのテンプレートはCookiecutterによって既に追加されており、「BrandNewBertModelIntegrationTests」と呼ばれています。このテストを記入するだけです。これらのテストが合格していることを確認するには、次のコマンドを実行します。 ```bash RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` <Tip> Windowsを使用している場合、`RUN_SLOW=1`を`SET RUN_SLOW=1`に置き換えてください。 </Tip> 次に、*brand_new_bert*に特有のすべての特徴は、別個のテスト内で追加されるべきです。 `BrandNewBertModelTester`/`BrandNewBertModelTest`の下に。この部分はよく忘れられますが、2つの点で非常に役立ちます: - モデルの追加中に獲得した知識をコミュニティに伝え、*brand_new_bert*の特別な機能がどのように動作するかを示すことによって、知識の共有を支援します。 - 将来の貢献者は、これらの特別なテストを実行することでモデルへの変更を迅速にテストできます。 **9. トークナイザの実装** 次に、*brand_new_bert*のトークナイザを追加する必要があります。通常、トークナイザは🤗 Transformersの既存のトークナイザと同等か非常に似ています。 トークナイザが正しく動作することを確認するためには、まず、元のリポジトリ内で文字列を入力し、`input_ids`を返すスクリプトを作成することをお勧めします。 このスクリプトは、次のように見えるかもしれません(疑似コードで示します): ```python input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") input_ids = model.tokenize(input_str) ``` オリジナルのリポジトリを詳しく調査し、正しいトークナイザの関数を見つける必要があるかもしれません。 または、オリジナルのリポジトリのクローンを変更して、`input_ids`だけを出力するようにする必要があるかもしれません。 オリジナルのリポジトリを使用した機能的なトークナイゼーションスクリプトを作成した後、 🤗 Transformers向けの類似したスクリプトを作成する必要があります。 以下のように見えるべきです: ```python from transformers import BrandNewBertTokenizer input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") input_ids = tokenizer(input_str).input_ids ``` `input_ids`が同じ値を生成した場合、最終ステップとしてトークナイザのテストファイルも追加するべきです。 *brand_new_bert*のモデルングテストファイルと同様に、*brand_new_bert*のトークナイズテストファイルには、いくつかのハードコードされた統合テストが含まれるべきです。 **10. エンドツーエンド統合テストの実行** トークナイザを追加した後、`🤗 Transformers`内の`tests/models/brand_new_bert/test_modeling_brand_new_bert.py`に モデルとトークナイザの両方を使用するいくつかのエンドツーエンド統合テストも追加する必要があります。 このようなテストは、🤗 Transformersの実装が期待どおりに機能することを示すべきです。 意味のあるテキスト対テキストのサンプルが含まれます。有用なテキスト対テキストのサンプルには、ソースからターゲットへの翻訳ペア、記事から要約へのペア、質問から回答へのペアなどが含まれます。 ポートされたチェックポイントがダウンストリームタスクでファインチューニングされていない場合、モデルのテストに依存するだけで十分です。 モデルが完全に機能していることを確認するために、すべてのテストをGPU上で実行することもお勧めします。 モデルの内部テンソルに`.to(self.device)`ステートメントを追加するのを忘れる可能性があるため、そのようなテストではエラーが表示されることがあります。 GPUにアクセスできない場合、Hugging Faceチームが代わりにこれらのテストを実行できます。 **11. ドキュメントの追加** これで、*brand_new_bert*の必要なすべての機能が追加されました - ほぼ完了です!残りの追加すべきことは、良いドキュメントとドキュメントページです。 Cookiecutterが`docs/source/model_doc/brand_new_bert.md`というテンプレートファイルを追加しているはずで、これを記入する必要があります。 モデルのユーザーは通常、モデルを使用する前にまずこのページを見ます。したがって、ドキュメンテーションは理解しやすく簡潔である必要があります。 モデルの使用方法を示すためにいくつかの*Tips*を追加することはコミュニティにとって非常に役立ちます。ドキュメンテーションに関しては、Hugging Faceチームに問い合わせることをためらわないでください。 次に、`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`に追加されたドキュメンテーション文字列が正しいこと、およびすべての必要な入力および出力を含んでいることを確認してください。 ドキュメンテーションの書き方とドキュメンテーション文字列のフォーマットについて詳細なガイドが[こちら](writing-documentation)にあります。 ドキュメンテーションは通常、コミュニティとモデルの最初の接触点であるため、コードと同じくらい注意深く扱うべきであることを常に念頭に置いてください。 **コードのリファクタリング** 素晴らしい、これで*brand_new_bert*に必要なすべてのコードが追加されました。 この時点で、次のようなポテンシャルなコードスタイルの誤りを訂正するために以下を実行する必要があります: ```bash make style ``` あなたのコーディングスタイルが品質チェックをパスすることを確認してください: ```bash make quality ``` 🤗 Transformersの非常に厳格なデザインテストには、まだ合格していない可能性があるいくつかの他のテストが存在するかもしれません。 これは、ドキュメント文字列に情報が不足しているか、名前が間違っていることが原因であることが多いです。Hugging Faceチームは、ここで詰まっている場合には必ず助けてくれるでしょう。 最後に、コードが正しく機能することを確認した後、コードをリファクタリングするのは常に良いアイデアです。 すべてのテストがパスした今、追加したコードを再度確認してリファクタリングを行うのは良いタイミングです。 これでコーディングの部分は完了しました、おめでとうございます! 🎉 あなたは素晴らしいです! 😎 **12. モデルをモデルハブにアップロード** 最後のパートでは、すべてのチェックポイントをモデルハブに変換してアップロードし、各アップロードしたモデルチェックポイントにモデルカードを追加する必要があります。 モデルハブの機能について詳しくは、[Model sharing and uploading Page](model_sharing)を読んで理解できます。 ここでは、*brand_new_bert*の著者組織の下にモデルをアップロードできるように必要なアクセス権を取得するために、Hugging Faceチームと協力する必要があります。 `transformers`のすべてのモデルに存在する`push_to_hub`メソッドは、チェックポイントをハブにプッシュする迅速かつ効率的な方法です。 以下に、少しのコードスニペットを示します: ```python brand_new_bert.push_to_hub("brand_new_bert") # Uncomment the following line to push to an organization. # brand_new_bert.push_to_hub("<organization>/brand_new_bert") ``` 各チェックポイントに適切なモデルカードを作成する価値があります。モデルカードは、この特定のチェックポイントの特性をハイライトするべきです。例えば、このチェックポイントはどのデータセットで事前学習/ファインチューニングされたか、どのような下流タスクでモデルを使用すべきかを示すべきです。また、モデルの正しい使用方法に関するコードも含めるべきです。 **13.(オプション)ノートブックの追加** *brand_new_bert*を推論または下流タスクのファインチューニングにどのように詳細に使用できるかを示すノートブックを追加することは非常に役立ちます。これはあなたのPRをマージするために必須ではありませんが、コミュニティにとって非常に有用です。 **14. 完成したPRの提出** プログラミングが完了したら、最後のステップに移動し、PRをメインブランチにマージしましょう。通常、Hugging Faceチームはこの時点で既にあなたをサポートしているはずですが、PRに良い説明を追加し、コードにコメントを追加して、レビュアーに特定の設計の選択肢を指摘したい場合はコメントを追加することも価値があります。 ### Share your work!! さあ、コミュニティからあなたの作業に対する評価を得る時が来ました!モデルの追加を完了することは、TransformersおよびNLPコミュニティにとって重要な貢献です。あなたのコードとポートされた事前学習済みモデルは、何百人、何千人という開発者や研究者によって確実に使用されるでしょう。あなたの仕事に誇りを持ち、コミュニティとあなたの成果を共有しましょう。 **あなたはコミュニティの誰でも簡単にアクセスできる別のモデルを作成しました! 🤯**
transformers/docs/source/ja/add_new_model.md/0
{ "file_path": "transformers/docs/source/ja/add_new_model.md", "repo_id": "transformers", "token_count": 27745 }
35
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Audio Spectrogram Transformer ## 概要 Audio Spectrogram Transformerモデルは、[AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778)という論文でYuan Gong、Yu-An Chung、James Glassによって提案されました。これは、音声を画像(スペクトログラム)に変換することで、音声に[Vision Transformer](vit)を適用します。このモデルは音声分類において最先端の結果を得ています。 論文の要旨は以下の通りです: *過去10年間で、畳み込みニューラルネットワーク(CNN)は、音声スペクトログラムから対応するラベルへの直接的なマッピングを学習することを目指す、エンドツーエンドの音声分類モデルの主要な構成要素として広く採用されてきました。長距離のグローバルなコンテキストをより良く捉えるため、最近の傾向として、CNNの上にセルフアテンション機構を追加し、CNN-アテンションハイブリッドモデルを形成することがあります。しかし、CNNへの依存が必要かどうか、そして純粋にアテンションに基づくニューラルネットワークだけで音声分類において良いパフォーマンスを得ることができるかどうかは明らかではありません。本論文では、これらの問いに答えるため、音声分類用では最初の畳み込みなしで純粋にアテンションベースのモデルであるAudio Spectrogram Transformer(AST)を紹介します。我々はASTを様々なオーディオ分類ベンチマークで評価し、AudioSetで0.485 mAP、ESC-50で95.6%の正解率、Speech Commands V2で98.1%の正解率という新たな最先端の結果を達成しました。* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png" alt="drawing" width="600"/> <small> Audio Spectrogram Transformerのアーキテクチャ。<a href="https://arxiv.org/abs/2104.01778">元論文</a>より抜粋。</small> このモデルは[nielsr](https://huggingface.co/nielsr)より提供されました。 オリジナルのコードは[こちら](https://github.com/YuanGongND/ast)で見ることができます。 ## 使用上のヒント - 独自のデータセットでAudio Spectrogram Transformer(AST)をファインチューニングする場合、入力の正規化(入力の平均を0、標準偏差を0.5にすること)処理することが推奨されます。[`ASTFeatureExtractor`]はこれを処理します。デフォルトではAudioSetの平均と標準偏差を使用していることに注意してください。著者が下流のデータセットの統計をどのように計算しているかは、[`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py)で確認することができます。 - ASTは低い学習率が必要であり 著者は[PSLA論文](https://arxiv.org/abs/2102.01243)で提案されたCNNモデルに比べて10倍小さい学習率を使用しています)、素早く収束するため、タスクに適した学習率と学習率スケジューラーを探すことをお勧めします。 ## 参考資料 Audio Spectrogram Transformerの使用を開始するのに役立つ公式のHugging Faceおよびコミュニティ(🌎で示されている)の参考資料の一覧です。 <PipelineTag pipeline="audio-classification"/> - ASTを用いた音声分類の推論を説明するノートブックは[こちら](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST)で見ることができます。 - [`ASTForAudioClassification`]は、この[例示スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification)と[ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)によってサポートされています。 - こちらも参照:[音声分類タスク](../tasks/audio_classification)。 ここに参考資料を提出したい場合は、気兼ねなくPull Requestを開いてください。私たちはそれをレビューいたします!参考資料は、既存のものを複製するのではなく、何か新しいことを示すことが理想的です。 ## ASTConfig [[autodoc]] ASTConfig ## ASTFeatureExtractor [[autodoc]] ASTFeatureExtractor - __call__ ## ASTModel [[autodoc]] ASTModel - forward ## ASTForAudioClassification [[autodoc]] ASTForAudioClassification - forward
transformers/docs/source/ja/model_doc/audio-spectrogram-transformer.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/audio-spectrogram-transformer.md", "repo_id": "transformers", "token_count": 2249 }
36
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Blenderbot Small [`BlenderbotSmallModel`] と [`BlenderbotSmallForConditionalGeneration`] はチェックポイントと組み合わせてのみ使用されます [facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M)。より大規模な Blenderbot チェックポイントは、 代わりに [`BlenderbotModel`] とともに使用してください。 [`BlenderbotForConditionalGeneration`] ## Overview Blender チャットボット モデルは、[Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller、Emily Dinan、Naman Goyal、Da Ju、Mary Williamson、yinghan Liu、で提案されました。 ジン・シュー、マイル・オット、カート・シャスター、エリック・M・スミス、Y-ラン・ブーロー、ジェイソン・ウェストン、2020年4月30日。 論文の要旨は次のとおりです。 *オープンドメインのチャットボットの構築は、機械学習研究にとって難しい分野です。これまでの研究では次のことが示されていますが、 ニューラル モデルをパラメーターの数とトレーニング対象のデータのサイズでスケーリングすると、結果が向上します。 高性能のチャットボットには他の要素も重要であることを示します。良い会話には多くのことが必要です 会話の専門家がシームレスに融合するスキル: 魅力的な話のポイントを提供し、話を聞く 一貫した態度を維持しながら、知識、共感、個性を適切に表現する ペルソナ。適切なトレーニング データと選択が与えられた場合、大規模モデルがこれらのスキルを学習できることを示します。 世代戦略。 90M、2.7B、9.4B パラメーター モデルを使用してこれらのレシピのバリアントを構築し、モデルを作成します。 コードは公開されています。人間による評価では、当社の最良のモデルが既存のアプローチよりも優れていることがマルチターンで示されています 魅力と人間性の測定という観点からの対話。次に、分析によってこの作業の限界について説明します。 弊社機種の故障事例* チップ: - Blenderbot Small は絶対位置埋め込みを備えたモデルなので、通常は入力を右側にパディングすることをお勧めします。 左。 このモデルは、[patrickvonplaten](https://huggingface.co/patrickvonplaten) によって提供されました。著者のコードは次のとおりです [ここ](https://github.com/facebookresearch/ParlAI) をご覧ください。 ## Documentation resources - [因果言語モデリング タスク ガイド](../tasks/language_modeling) - [翻訳タスクガイド](../tasks/translation) - [要約タスクガイド](../tasks/summarization) ## BlenderbotSmallConfig [[autodoc]] BlenderbotSmallConfig ## BlenderbotSmallTokenizer [[autodoc]] BlenderbotSmallTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## BlenderbotSmallTokenizerFast [[autodoc]] BlenderbotSmallTokenizerFast ## BlenderbotSmallModel [[autodoc]] BlenderbotSmallModel - forward ## BlenderbotSmallForConditionalGeneration [[autodoc]] BlenderbotSmallForConditionalGeneration - forward ## BlenderbotSmallForCausalLM [[autodoc]] BlenderbotSmallForCausalLM - forward ## TFBlenderbotSmallModel [[autodoc]] TFBlenderbotSmallModel - call ## TFBlenderbotSmallForConditionalGeneration [[autodoc]] TFBlenderbotSmallForConditionalGeneration - call ## FlaxBlenderbotSmallModel [[autodoc]] FlaxBlenderbotSmallModel - __call__ - encode - decode ## FlaxBlenderbotForConditionalGeneration [[autodoc]] FlaxBlenderbotSmallForConditionalGeneration - __call__ - encode - decode
transformers/docs/source/ja/model_doc/blenderbot-small.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/blenderbot-small.md", "repo_id": "transformers", "token_count": 1831 }
37
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CodeLlama ## Overview Code Llama モデルはによって [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) で提案されました。 Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. 論文の要約は次のとおりです。 *私たちは Code Llama をリリースします。これは Llama 2 に基づくコードの大規模言語モデル ファミリであり、オープン モデルの中で最先端のパフォーマンス、埋め込み機能、大規模な入力コンテキストのサポート、プログラミング タスクのゼロショット命令追従機能を提供します。 。幅広いアプリケーションをカバーするための複数のフレーバーを提供しています。基盤モデル (Code Llama)、Python 特化 (Code Llama - Python)、およびそれぞれ 7B、13B、および 34B パラメーターを備えた命令追従モデル (Code Llama - Instruct) です。すべてのモデルは 16,000 トークンのシーケンスでトレーニングされ、最大 100,000 トークンの入力で改善が見られます。 7B および 13B コード ラマとコード ラマ - 命令バリアントは、周囲のコンテンツに基づいた埋め込みをサポートします。 Code Llama は、いくつかのコード ベンチマークでオープン モデルの中で最先端のパフォーマンスに達し、HumanEval と MBPP でそれぞれ最大 53% と 55% のスコアを獲得しました。特に、Code Llama - Python 7B は HumanEval および MBPP 上で Llama 2 70B よりも優れたパフォーマンスを示し、すべてのモデルは MultiPL-E 上で公開されている他のすべてのモデルよりも優れています。私たちは、研究と商業利用の両方を許可する寛容なライセンスに基づいて Code Llama をリリースしています。* すべての Code Llama モデル チェックポイントを [こちら](https://huggingface.co/models?search=code_llama) で確認し、[meta llama org](https://huggingface.co/meta-llama) で正式にリリースされたチェックポイントを確認してください。 このモデルは [ArthurZucker](https://huggingface.co/ArthurZ) によって提供されました。著者のオリジナルのコードは [こちら](https://github.com/facebookresearch/llama) にあります。 ## Usage tips and examples <Tip warning={true}> Code Llama のベースとなる`Llama2`ファミリー モデルは、`bfloat16`を使用してトレーニングされましたが、元の推論では`float16`を使用します。さまざまな精度を見てみましょう。 * `float32`: モデルの初期化に関する PyTorch の規約では、モデルの重みがどの `dtype` で格納されたかに関係なく、モデルを `float32` にロードします。 「transformers」も、PyTorch との一貫性を保つためにこの規則に従っています。これはデフォルトで選択されます。 `AutoModel` API でストレージの重み付けタイプを使用してチェックポイントのロードをキャストする場合は、`torch_dtype="auto"` を指定する必要があります。 `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`。 * `bfloat16`: コード Llama はこの精度でトレーニングされているため、さらなるトレーニングや微調整に使用することをお勧めします。 * `float16`: この精度を使用して推論を実行することをお勧めします。通常は `bfloat16` より高速であり、評価メトリクスには `bfloat16` と比べて明らかな低下が見られないためです。 bfloat16 を使用して推論を実行することもできます。微調整後、float16 と bfloat16 の両方で推論結果を確認することをお勧めします。 上で述べたように、モデルを初期化するときに `torch_dtype="auto"` を使用しない限り、ストレージの重みの `dtype` はほとんど無関係です。その理由は、モデルが最初にダウンロードされ (オンラインのチェックポイントの `dtype` を使用)、次に `torch` のデフォルトの `dtype` にキャストされるためです (`torch.float32` になります)。指定された `torch_dtype` がある場合は、代わりにそれが使用されます。 </Tip> チップ: - 充填タスクはすぐにサポートされます。入力を埋めたい場所には `tokenizer.fill_token` を使用する必要があります。 - モデル変換スクリプトは、`Llama2` ファミリの場合と同じです。 使用例は次のとおりです。 ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` スクリプトを実行するには、(最大のバージョンであっても) float16 精度でモデル全体をホストするのに十分な CPU RAM が必要であることに注意してください。 いくつかのチェックポイントがあり、それぞれにモデルの各重みの一部が含まれているため、すべてを RAM にロードする必要があります)。 変換後、モデルとトークナイザーは次の方法でロードできます。 ```python >>> from transformers import LlamaForCausalLM, CodeLlamaTokenizer >>> tokenizer = CodeLlamaTokenizer.from_pretrained("meta-llama/CodeLlama-7b-hf") >>> model = LlamaForCausalLM.from_pretrained("meta-llama/CodeLlama-7b-hf") >>> PROMPT = '''def remove_non_ascii(s: str) -> str: """ <FILL_ME> return result ''' >>> input_ids = tokenizer(PROMPT, return_tensors="pt")["input_ids"] >>> generated_ids = model.generate(input_ids, max_new_tokens=128) >>> filling = tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens = True)[0] >>> print(PROMPT.replace("<FILL_ME>", filling)) def remove_non_ascii(s: str) -> str: """ Remove non-ASCII characters from a string. Args: s: The string to remove non-ASCII characters from. Returns: The string with non-ASCII characters removed. """ result = "" for c in s: if ord(c) < 128: result += c return result ``` 塗りつぶされた部分だけが必要な場合: ```python >>> from transformers import pipeline >>> import torch >>> generator = pipeline("text-generation",model="meta-llama/CodeLlama-7b-hf",torch_dtype=torch.float16, device_map="auto") >>> generator('def remove_non_ascii(s: str) -> str:\n """ <FILL_ME>\n return result', max_new_tokens = 128) [{'generated_text': 'def remove_non_ascii(s: str) -> str:\n """ <FILL_ME>\n return resultRemove non-ASCII characters from a string. """\n result = ""\n for c in s:\n if ord(c) < 128:\n result += c'}] ``` 内部では、トークナイザーが [`<FILL_ME>` によって自動的に分割](https://huggingface.co/docs/transformers/main/model_doc/code_llama#transformers.CodeLlamaTokenizer.fill_token) して、[ に続く書式設定された入力文字列を作成します。オリジナルのトレーニング パターン](https://github.com/facebookresearch/codellama/blob/cb51c14ec761370ba2e2bc351374a79265d0465e/llama/generation.py#L402)。これは、パターンを自分で準備するよりも堅牢です。トークンの接着など、デバッグが非常に難しい落とし穴を回避できます。このモデルまたは他のモデルに必要な CPU および GPU メモリの量を確認するには、その値を決定するのに役立つ [この計算ツール](https://huggingface.co/spaces/hf-accelerate/model-memory-usage) を試してください。 LLaMA トークナイザーは、[sentencepiece](https://github.com/google/sentencepiece) に基づく BPE モデルです。センテンスピースの癖の 1 つは、シーケンスをデコードするときに、最初のトークンが単語の先頭 (例: 「Banana」) である場合、トークナイザーは文字列の先頭にプレフィックス スペースを追加しないことです。 <Tip> コード Llama は、`Llama2` モデルと同じアーキテクチャを持っています。API リファレンスについては、[Llama2 のドキュメント ページ](llama2) を参照してください。 以下の Code Llama トークナイザーのリファレンスを見つけてください。 </Tip> ## CodeLlamaTokenizer [[autodoc]] CodeLlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CodeLlamaTokenizerFast [[autodoc]] CodeLlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary
transformers/docs/source/ja/model_doc/code_llama.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/code_llama.md", "repo_id": "transformers", "token_count": 4114 }
38
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DePlot ## Overview DePlot は、Fangyu Liu、Julian Martin Aisenschlos、Francesco Piccinno、Syrine Krichene、Chenxi Pang, Kenton Lee, Mandar Joshi, Wenhu Chen, Nigel Collier, Yasemin Altun. の論文 [DePlot: One-shot visual language reasoning by plot-to-table translation](https://arxiv.org/abs/2212.10505) で提案されました。パン・ 論文の要約には次のように記載されています。 *チャートやプロットなどの視覚言語は人間の世界に遍在しています。プロットやチャートを理解するには、強力な推論スキルが必要です。従来の最先端 (SOTA) モデルには少なくとも数万のトレーニング サンプルが必要であり、その推論能力は、特に人間が作成した複雑なクエリでは依然として大幅に制限されています。この論文では、視覚言語推論に対する最初のワンショット ソリューションを紹介します。私たちは、視覚言語推論の課題を 2 つのステップに分解します。(1) プロットからテキストへの翻訳と、(2) 翻訳されたテキストに対する推論です。この方法の鍵となるのは、プロットまたはチャートの画像を線形化されたテーブルに変換する、DePlot という名前のモダリティ変換モジュールです。その後、DePlot の出力を直接使用して、事前トレーニング済みの大規模言語モデル (LLM) をプロンプトし、LLM の少数ショット推論機能を利用できます。 DePlot を取得するには、統一されたタスク形式とメトリクスを確立することでプロットからテーブルへのタスクを標準化し、このタスクで DePlot をエンドツーエンドでトレーニングします。 DePlot は、プラグアンドプレイ方式で LLM とともに既製で使用できます。 28,000 を超えるデータ ポイントで微調整された SOTA モデルと比較して、ワンショット プロンプトのみを使用する DePlot+LLM は、チャート QA タスクからの人が作成したクエリに関して、微調整された SOTA より 24.0% の改善を達成しました。* DePlot は、`Pix2Struct` アーキテクチャを使用してトレーニングされたモデルです。 `Pix2Struct` の詳細については、[Pix2Struct ドキュメント](https://huggingface.co/docs/transformers/main/en/model_doc/pix2struct) を参照してください。 DePlot は、`Pix2Struct` アーキテクチャの Visual Question Answering サブセットです。入力された質問を画像上にレンダリングし、答えを予測します。 ## Usage example 現在、DePlot で使用できるチェックポイントは 1 つです。 - `google/deplot`: ChartQA データセットで微調整された DePlot ```python from transformers import AutoProcessor, Pix2StructForConditionalGeneration import requests from PIL import Image model = Pix2StructForConditionalGeneration.from_pretrained("google/deplot") processor = AutoProcessor.from_pretrained("google/deplot") url = "https://raw.githubusercontent.com/vis-nlp/ChartQA/main/ChartQA%20Dataset/val/png/5090.png" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(images=image, text="Generate underlying data table of the figure below:", return_tensors="pt") predictions = model.generate(**inputs, max_new_tokens=512) print(processor.decode(predictions[0], skip_special_tokens=True)) ``` ## Fine-tuning DePlot を微調整するには、pix2struct [微調整ノートブック](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_pix2struct.ipynb) を参照してください。 `Pix2Struct` モデルの場合、Adafactor とコサイン学習率スケジューラを使用してモデルを微調整すると、収束が高速化されることがわかりました。 ```python from transformers.optimization import Adafactor, get_cosine_schedule_with_warmup optimizer = Adafactor(self.parameters(), scale_parameter=False, relative_step=False, lr=0.01, weight_decay=1e-05) scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=1000, num_training_steps=40000) ``` <Tip> DePlot は、`Pix2Struct`アーキテクチャを使用してトレーニングされたモデルです。 API リファレンスについては、[`Pix2Struct` ドキュメント](pix2struct) を参照してください。 </Tip>
transformers/docs/source/ja/model_doc/deplot.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deplot.md", "repo_id": "transformers", "token_count": 2027 }
39
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Train with a script 🤗 Transformersの[notebooks](./notebooks/README)と一緒に、[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch)、[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow)、または[JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax)を使用してモデルをトレーニングする方法を示すサンプルスクリプトもあります。 また、私たちの[研究プロジェクト](https://github.com/huggingface/transformers/tree/main/examples/research_projects)や[レガシーの例](https://github.com/huggingface/transformers/tree/main/examples/legacy)で使用したスクリプトも見つかります。これらのスクリプトは現在メンテナンスされておらず、おそらく最新バージョンのライブラリと互換性がない特定の🤗 Transformersのバージョンが必要です。 サンプルスクリプトはすべての問題でそのまま動作することは期待されておらず、解決しようとしている問題にスクリプトを適応させる必要があるかもしれません。この点をサポートするために、ほとんどのスクリプトはデータがどのように前処理されているかを完全に公開し、必要に応じて編集できるようにしています。 サンプルスクリプトで実装したい機能がある場合は、[フォーラム](https://discuss.huggingface.co/)か[イシュートラッカー](https://github.com/huggingface/transformers/issues)で議論してからプルリクエストを提出してください。バグ修正は歓迎しますが、読みやすさのコストで機能を追加するプルリクエストはほとんどマージされない可能性が高いです。 このガイドでは、[PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization)と[TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization)で実行するサマリゼーショントレーニングスクリプトの実行方法を示します。すべての例は、明示的に指定されていない限り、両方のフレームワークともに動作することが期待されています。 ## Setup 最新バージョンのサンプルスクリプトを正常に実行するには、新しい仮想環境に🤗 Transformersをソースからインストールする必要があります: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` 以前のスクリプトのバージョンについては、以下のトグルをクリックしてください: <details> <summary>以前の🤗 Transformersのバージョンに関する例</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> 次に、現在の🤗 Transformersのクローンを特定のバージョンに切り替えてください。たとえば、v3.5.1などです。 ```bash git checkout tags/v3.5.1 ``` 適切なライブラリバージョンを設定したら、任意の例のフォルダに移動し、例固有の要件をインストールします: ```bash pip install -r requirements.txt ``` ## Run a script <frameworkcontent> <pt> この例のスクリプトは、🤗 [Datasets](https://huggingface.co/docs/datasets/) ライブラリからデータセットをダウンロードし、前処理を行います。次に、[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) を使用して要約をサポートするアーキテクチャ上でデータセットをファインチューニングします。以下の例では、[CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) データセット上で [T5-small](https://huggingface.co/google-t5/t5-small) をファインチューニングする方法が示されています。T5 モデルは、そのトレーニング方法に起因して追加の `source_prefix` 引数が必要です。このプロンプトにより、T5 はこれが要約タスクであることを知ることができます。 ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> この例のスクリプトは、🤗 [Datasets](https://huggingface.co/docs/datasets/) ライブラリからデータセットをダウンロードして前処理します。その後、スクリプトは要約をサポートするアーキテクチャ上で Keras を使用してデータセットをファインチューニングします。以下の例では、[T5-small](https://huggingface.co/google-t5/t5-small) を [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail) データセットでファインチューニングする方法を示しています。T5 モデルは、そのトレーニング方法に起因して追加の `source_prefix` 引数が必要です。このプロンプトは、T5 にこれが要約タスクであることを知らせます。 ```bash python examples/tensorflow/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Distributed training and mixed precision [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer)は、分散トレーニングと混合精度をサポートしています。つまり、この機能をスクリプトで使用することができます。これらの機能を有効にするには、次の手順を実行します。 - `fp16`引数を追加して混合精度を有効にします。 - `nproc_per_node`引数で使用するGPUの数を設定します。 以下は提供されたBashコードです。このコードの日本語訳をMarkdown形式で記載します。 ```bash torchrun \ --nproc_per_node 8 pytorch/summarization/run_summarization.py \ --fp16 \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` TensorFlowスクリプトは、分散トレーニングに[`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy)を使用し、トレーニングスクリプトに追加の引数を追加する必要はありません。TensorFlowスクリプトは、デフォルトで複数のGPUが利用可能な場合にそれらを使用します。 ## Run a script on a TPU <frameworkcontent> <pt> Tensor Processing Units (TPUs)は、パフォーマンスを加速させるために特別に設計されています。PyTorchは、[XLA](https://www.tensorflow.org/xla)ディープラーニングコンパイラを使用してTPUsをサポートしており、詳細については[こちら](https://github.com/pytorch/xla/blob/master/README.md)をご覧ください。TPUを使用するには、`xla_spawn.py`スクリプトを起動し、`num_cores`引数を使用して使用するTPUコアの数を設定します。 ```bash python xla_spawn.py --num_cores 8 \ summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` </pt> <tf> もちろん、Tensor Processing Units(TPUs)は性能を高速化するために特別に設計されています。TensorFlowスクリプトは、TPUsでトレーニングするために[`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy)を利用します。TPUを使用するには、TPUリソースの名前を`tpu`引数に渡します。 ```bash python run_summarization.py \ --tpu name_of_tpu_resource \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 16 \ --num_train_epochs 3 \ --do_train \ --do_eval ``` </tf> </frameworkcontent> ## Run a script with 🤗 Accelerate 🤗 [Accelerate](https://huggingface.co/docs/accelerate)は、PyTorch専用のライブラリで、CPUのみ、複数のGPU、TPUなど、さまざまなセットアップでモデルをトレーニングするための統一された方法を提供します。PyTorchのトレーニングループを完全に可視化しながら実行できます。まだインストールしていない場合は、🤗 Accelerateをインストールしてください: > 注意:Accelerateは急速に開発が進行しているため、スクリプトを実行するにはaccelerateのgitバージョンをインストールする必要があります ```bash pip install git+https://github.com/huggingface/accelerate ``` 代わりに、`run_summarization_no_trainer.py` スクリプトを使用する必要があります。 🤗 Accelerate がサポートするスクリプトには、フォルダ内に `task_no_trainer.py` ファイルが含まれています。まず、次のコマンドを実行して設定ファイルを作成し、保存します: ```bash accelerate config ``` テストを行い、設定が正しく構成されているか確認してください: ```bash accelerate test ``` Now you are ready to launch the training: ```bash accelerate launch run_summarization_no_trainer.py \ --model_name_or_path google-t5/t5-small \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir ~/tmp/tst-summarization ``` ## Use a custom dataset 要約スクリプトは、CSVまたはJSON Lineファイルであれば、カスタムデータセットをサポートしています。独自のデータセットを使用する場合、いくつかの追加の引数を指定する必要があります。 - `train_file`および`validation_file`は、トレーニングとバリデーションのファイルへのパスを指定します。 - `text_column`は要約するための入力テキストです。 - `summary_column`は出力する対象テキストです。 カスタムデータセットを使用した要約スクリプトは、以下のようになります: ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --train_file path_to_csv_or_jsonlines_file \ --validation_file path_to_csv_or_jsonlines_file \ --text_column text_column_name \ --summary_column summary_column_name \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --overwrite_output_dir \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --predict_with_generate ``` ## Test a script すべてが予想通りに動作することを確認するために、データセット全体を処理する前に、データセットの一部の例でスクリプトを実行することは良いアイデアです。以下の引数を使用して、データセットを最大サンプル数に切り詰めます: - `max_train_samples` - `max_eval_samples` - `max_predict_samples` ```bash python examples/pytorch/summarization/run_summarization.py \ --model_name_or_path google-t5/t5-small \ --max_train_samples 50 \ --max_eval_samples 50 \ --max_predict_samples 50 \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ``` 一部の例のスクリプトは、`max_predict_samples`引数をサポートしていないことがあります。この引数がサポートされているかどうかがわからない場合は、`-h`引数を追加して確認してください。 ```bash examples/pytorch/summarization/run_summarization.py -h ``` ## Resume training from checkpoint 以前のチェックポイントからトレーニングを再開するための役立つオプションもあります。これにより、トレーニングが中断された場合でも、最初からやり直すことなく、中断したところから再開できます。チェックポイントからトレーニングを再開するための2つの方法があります。 最初の方法は、`output_dir previous_output_dir` 引数を使用して、`output_dir` に保存された最新のチェックポイントからトレーニングを再開する方法です。この場合、`overwrite_output_dir` を削除する必要があります: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --output_dir previous_output_dir \ --predict_with_generate ``` 2番目の方法では、`resume_from_checkpoint path_to_specific_checkpoint` 引数を使用して、特定のチェックポイントフォルダからトレーニングを再開します。 ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --resume_from_checkpoint path_to_specific_checkpoint \ --predict_with_generate ``` ## Share your model すべてのスクリプトは、最終的なモデルを [Model Hub](https://huggingface.co/models) にアップロードできます。開始する前に Hugging Face にログインしていることを確認してください。 ```bash huggingface-cli login ``` 次に、スクリプトに `push_to_hub` 引数を追加します。この引数は、Hugging Face のユーザー名と `output_dir` で指定したフォルダ名でリポジトリを作成します。 特定の名前をリポジトリに付けるには、`push_to_hub_model_id` 引数を使用して追加します。このリポジトリは自動的にあなたの名前空間の下にリストされます。 以下の例は、特定のリポジトリ名でモデルをアップロードする方法を示しています: ```bash python examples/pytorch/summarization/run_summarization.py --model_name_or_path google-t5/t5-small \ --do_train \ --do_eval \ --dataset_name cnn_dailymail \ --dataset_config "3.0.0" \ --source_prefix "summarize: " \ --push_to_hub \ --push_to_hub_model_id finetuned-t5-cnn_dailymail \ --output_dir /tmp/tst-summarization \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=4 \ --overwrite_output_dir \ --predict_with_generate ```
transformers/docs/source/ja/run_scripts.md/0
{ "file_path": "transformers/docs/source/ja/run_scripts.md", "repo_id": "transformers", "token_count": 8250 }
40
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLM prompting guide [[open-in-colab]] Falcon、LLaMA などの大規模言語モデルは、事前にトレーニングされたトランスフォーマー モデルであり、最初は予測するようにトレーニングされています。 入力テキストが与えられた場合の次のトークン。通常、数十億のパラメータがあり、何兆ものパラメータでトレーニングされています。 長期間のトークン。その結果、これらのモデルは非常に強力で多用途になり、次のようなことが可能になります。 自然言語プロンプトでモデルに指示することで、すぐに複数の NLP タスクを解決できます。 最適な出力を保証するためにこのようなプロンプトを設計することは、多くの場合「プロンプト エンジニアリング」と呼ばれます。プロンプトエンジニアリングとは、 かなりの量の実験を必要とする反復プロセス。自然言語ははるかに柔軟で表現力豊かです ただし、プログラミング言語よりもあいまいさが生じる可能性があります。同時に、自然言語によるプロンプト 変化にはかなり敏感です。プロンプトにわずかな変更を加えただけでも、出力が大幅に異なる場合があります。 すべてのケースに適合するプロンプトを作成するための正確なレシピはありませんが、研究者はいくつかの最良のレシピを考案しました。 最適な結果をより一貫して達成するのに役立つ実践。 このガイドでは、より優れた LLM プロンプトを作成し、さまざまな NLP タスクを解決するのに役立つプロンプト エンジニアリングのベスト プラクティスについて説明します。 次のことを学びます: - [プロンプトの基本](#basics-of-prompting) - [LLM プロンプトのベスト プラクティス](#best-practices-of-llm-prompting) - [高度なプロンプト テクニック: 数回のプロンプトと思考の連鎖](#advanced-prompting-techniques) - [プロンプトを表示する代わりに微調整する場合](#prompting-vs-fine-tuning) <Tip> 迅速なエンジニアリングは、LLM 出力最適化プロセスの一部にすぎません。もう 1 つの重要な要素は、 最適なテキスト生成戦略。 LLM が生成時に後続の各トークンを選択する方法をカスタマイズできます。 トレーニング可能なパラメータを一切変更せずにテキストを作成します。テキスト生成パラメータを微調整することで、 生成されたテキストに繰り返しが含まれているため、より一貫性があり人間らしい響きになります。 テキスト生成戦略とパラメーターはこのガイドの範囲外ですが、これらのトピックについて詳しくは、次のトピックを参照してください。 次のガイド: * [LLM による生成](../llm_tutorial) * [テキスト生成戦略](../generation_strategies) </Tip> ## Basics of prompting ### Types of models 最新の LLM の大部分は、デコーダ専用のトランスフォーマーです。例としては、[LLaMA](../model_doc/llama)、 [Llama2](../model_doc/llama2)、[Falcon](../model_doc/falcon)、[GPT2](../model_doc/gpt2)。ただし、遭遇する可能性があります エンコーダ デコーダ トランスフォーマ LLM も同様です。たとえば、[Flan-T5](../model_doc/flan-t5) や [BART](../model_doc/bart) です。 エンコーダ デコーダ スタイルのモデルは通常、出力が入力に**大きく**依存する生成タスクで使用されます。 たとえば、翻訳と要約です。デコーダ専用モデルは、他のすべてのタイプの生成タスクに使用されます。 パイプラインを使用して LLM でテキストを生成する場合、使用している LLM のタイプを知ることが重要です。 異なるパイプラインを使用します。 `text-generation`パイプラインを使用してデコーダのみのモデルで推論を実行します。 ```python >>> from transformers import pipeline >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> generator = pipeline('text-generation', model = 'openai-community/gpt2') >>> prompt = "Hello, I'm a language model" >>> generator(prompt, max_length = 30) [{'generated_text': "Hello, I'm a language model expert, so I'm a big believer in the concept that I know very well and then I try to look into"}] ``` エンコーダー/デコーダーを使用して推論を実行するには、`text2text-generation` パイプラインを使用します。 ```python >>> text2text_generator = pipeline("text2text-generation", model = 'google/flan-t5-base') >>> prompt = "Translate from English to French: I'm very happy to see you" >>> text2text_generator(prompt) [{'generated_text': 'Je suis très heureuse de vous rencontrer.'}] ``` ### Base vs instruct/chat models 🤗 Hub で利用できる最近の LLM チェックポイントのほとんどには、base と instruct (または chat) の 2 つのバージョンがあります。例えば、 [`tiiuae/falcon-7b`](https://huggingface.co/tiiuae/falcon-7b) および [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b) -指示する)。 基本モデルは、最初のプロンプトが与えられたときにテキストを完成させるのには優れていますが、NLP タスクには理想的ではありません。 指示に従う必要がある場合、または会話で使用する場合に使用します。ここで、指示 (チャット) バージョンが登場します。 これらのチェックポイントは、命令と会話データに基づいて事前トレーニングされたベース バージョンをさらに微調整した結果です。 この追加の微調整により、多くの NLP タスクにとってより適切な選択肢になります。 [`tiiuae/falcon-7b-instruct`](https://huggingface.co/tiiuae/falcon-7b-instruct) で使用できるいくつかの簡単なプロンプトを示してみましょう。 いくつかの一般的な NLP タスクを解決します。 ### NLP tasks まず、環境をセットアップしましょう。 ```bash pip install -q transformers accelerate ``` 次に、適切なパイプライン (`text_generation`) を使用してモデルをロードしましょう。 ```python >>> from transformers import pipeline, AutoTokenizer >>> import torch >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> model = "tiiuae/falcon-7b-instruct" >>> tokenizer = AutoTokenizer.from_pretrained(model) >>> pipe = pipeline( ... "text-generation", ... model=model, ... tokenizer=tokenizer, ... torch_dtype=torch.bfloat16, ... device_map="auto", ... ) ``` <Tip> Falcon モデルは `bfloat16` データ型を使用してトレーニングされたため、同じものを使用することをお勧めします。これには、最近の CUDA のバージョンに準拠しており、最新のカードで最適に動作します。 </Tip> パイプライン経由でモデルをロードしたので、プロンプトを使用して NLP タスクを解決する方法を見てみましょう。 #### Text classification テキスト分類の最も一般的な形式の 1 つはセンチメント分析であり、「ポジティブ」、「ネガティブ」、「ネガティブ」などのラベルを割り当てます。 または、一連のテキストに対して「中立」です。与えられたテキスト (映画レビュー) を分類するようにモデルに指示するプロンプトを作成してみましょう。 まず指示を与え、次に分類するテキストを指定します。そのままにしておくのではなく、 応答の先頭にも追加します - `"Sentiment: "`: ```python >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> prompt = """Classify the text into neutral, negative or positive. ... Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. ... Sentiment: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Classify the text into neutral, negative or positive. Text: This movie is definitely one of my favorite movies of its kind. The interaction between respectable and morally strong characters is an ode to chivalry and the honor code amongst thieves and policemen. Sentiment: Positive ``` その結果、出力には、手順で提供したリストの分類ラベルが含まれており、それは正しいラベルです。 <Tip> プロンプトに加えて、`max_new_tokens`パラメータを渡していることに気づくかもしれません。トークンの数を制御します。 モデルが生成します。これは、学習できる多くのテキスト生成パラメーターの 1 つです。 [テキスト生成戦略](../generation_strategies) ガイドを参照してください。 </Tip> #### Named Entity Recognition 固有表現認識 (NER) は、テキスト内の人物、場所、組織などの固有表現を検索するタスクです。 プロンプトの指示を変更して、LLM にこのタスクを実行させましょう。ここでは`return_full_text = False`も設定しましょう 出力にプロンプ​​トが含​​まれないようにします。 ```python >>> torch.manual_seed(1) # doctest: +IGNORE_RESULT >>> prompt = """Return a list of named entities in the text. ... Text: The Golden State Warriors are an American professional basketball team based in San Francisco. ... Named entities: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=15, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") - Golden State Warriors - San Francisco ``` ご覧のとおり、モデルは指定されたテキストから 2 つの名前付きエンティティを正しく識別しました。 #### Translation LLM が実行できるもう 1 つのタスクは翻訳です。このタスクにはエンコーダー/デコーダー モデルを使用することを選択できますが、ここでは 例を簡単にするために、きちんとした仕事をする Falcon-7b-instruct を使い続けます。もう一度、方法は次のとおりです テキストの一部を英語からイタリア語に翻訳するようにモデルに指示する基本的なプロンプトを作成できます。 ```python >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> prompt = """Translate the English text to Italian. ... Text: Sometimes, I've believed as many as six impossible things before breakfast. ... Translation: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=20, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") A volte, ho creduto a sei impossibili cose prima di colazione. ``` ここでは、出力生成時にモデルがもう少し柔軟になるように `do_sample=True` と `top_k=10` を追加しました。 #### Text summarization 翻訳と同様に、テキストの要約も、出力が入力に**大きく**依存する生成タスクです。 エンコーダ/デコーダ モデルの方が良い選択になる可能性があります。ただし、デコーダ スタイルのモデルもこのタスクに使用できます。 以前は、プロンプトの先頭に指示を配置していました。ただし、プロンプトの最後で、 指示を与えるのに適した場所でもあります。通常、命令はどちらかの端に配置することをお勧めします。 ```python >>> torch.manual_seed(3) # doctest: +IGNORE_RESULT >>> prompt = """Permaculture is a design process mimicking the diversity, functionality and resilience of natural ecosystems. The principles and practices are drawn from traditional ecological knowledge of indigenous cultures combined with modern scientific understanding and technological innovations. Permaculture design provides a framework helping individuals and communities develop innovative, creative and effective strategies for meeting basic needs while preparing for and mitigating the projected impacts of climate change. ... Write a summary of the above text. ... Summary: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"{seq['generated_text']}") Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding. ``` #### Question answering 質問応答タスクの場合、プロンプトを次の論理コンポーネントに構造化できます: 指示、コンテキスト、質問、 先頭の単語またはフレーズ (`"Answer:"`) を使用して、モデルを操作して答えの生成を開始します。 ```python >>> torch.manual_seed(4) # doctest: +IGNORE_RESULT >>> prompt = """Answer the question using the context below. ... Context: Gazpacho is a cold soup and drink made of raw, blended vegetables. Most gazpacho includes stale bread, tomato, cucumbers, onion, bell peppers, garlic, olive oil, wine vinegar, water, and salt. Northern recipes often include cumin and/or pimentón (smoked sweet paprika). Traditionally, gazpacho was made by pounding the vegetables in a mortar with a pestle; this more laborious method is still sometimes used as it helps keep the gazpacho cool and avoids the foam and silky consistency of smoothie versions made in blenders or food processors. ... Question: What modern tool is used to make gazpacho? ... Answer: ... """ >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Modern tools are used, such as immersion blenders ``` #### Reasoning LLM にとって推論は最も困難なタスクの 1 つであり、良い結果を達成するには、多くの場合、次のような高度なプロンプト テクニックを適用する必要があります。 [Chain-of-thought](#chain-of-thought)。 基本的なプロンプトを使用して、単純な算術タスクに関するモデル推論を作成できるかどうか試してみましょう。 ```python >>> torch.manual_seed(5) # doctest: +IGNORE_RESULT >>> prompt = """There are 5 groups of students in the class. Each group has 4 students. How many students are there in the class?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=30, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: There are a total of 5 groups, so there are 5 x 4=20 students in the class. ``` 正しい!もう少し複雑さを増やして、基本的なプロンプトで問題を解決できるかどうかを確認してみましょう。 ```python >>> torch.manual_seed(6) # doctest: +IGNORE_RESULT >>> prompt = """I baked 15 muffins. I ate 2 muffins and gave 5 muffins to a neighbor. My partner then bought 6 more muffins and ate 2. How many muffins do we now have?""" >>> sequences = pipe( ... prompt, ... max_new_tokens=10, ... do_sample=True, ... top_k=10, ... return_full_text = False, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: The total number of muffins now is 21 ``` これは間違った答えです。12 である必要があります。この場合、プロンプトが基本的すぎるか、選択内容が原因である可能性があります。 結局のところ、Falcon の最小バージョンを選択しました。あらゆるサイズのモデルでは推論が困難ですが、より大きなモデルでは モデルのパフォーマンスが向上する可能性があります。 ## Best practices of LLM prompting ガイドのこのセクションでは、プロンプトの結果を改善する傾向にあるベスト プラクティスのリストをまとめました。 * 使用するモデルを選択する場合は、最新かつ最も機能的なモデルの方がパフォーマンスが向上する可能性があります。 * シンプルで短いプロンプトから始めて、そこから繰り返します。 * 指示はプロンプトの最初または最後に入力してください。大規模なコンテキストを扱う場合、モデルはさまざまな最適化を適用して、アテンションの複雑さが二次的に拡大するのを防ぎます。これにより、モデルはプロンプトの途中よりも最初または最後に注意を払うようになります。 * 指示と、それが適用されるテキストを明確に区別してください。これについては、次のセクションで詳しく説明します。 * タスクと望ましい結果 (その形式、長さ、スタイル、言語など) について具体的かつ説明的にします。 * 曖昧な説明や指示は避けてください。 *「何をしてはいけないか」という指示ではなく、「何をすべきか」という指示を優先します。 * 最初の単語を書いて (またはモデルの最初の文を始めて)、出力を正しい方向に「導き」ます。 * [Few-shot prompting](#few-shot-prompting) や [Chain-of-thought](#chain-of-thought) などの高度なテクニックを使用します。 * さまざまなモデルでプロンプトをテストして、その堅牢性を評価します。 * プロンプトのバージョンを確認し、パフォーマンスを追跡します。 ## Advanced prompting techniques ### Few-shot prompting 上記のセクションの基本的なプロンプトは、「ゼロショット」プロンプトの例です。つまり、モデルにはすでに与えられています。 指示とコンテキストはありますが、解決策を含む例はありません。通常、命令データセットに基づいて微調整された LLM このような「ゼロショット」タスクでも優れたパフォーマンスを発揮します。ただし、タスクがより複雑であったり微妙な点があったりする場合があります。 出力には、命令だけではモデルが理解できないいくつかの要件があります。この場合、次のことができます。 少数ショット プロンプトと呼ばれるテクニックを試してください。 少数ショット プロンプトでは、モデルにパフォーマンスを向上させるためのより多くのコンテキストを提供するプロンプト内の例が提供されます。 例では、例のパターンに従って出力を生成するようにモデルを条件付けします。 以下に例を示します。 ```python >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT >>> prompt = """Text: The first human went into space and orbited the Earth on April 12, 1961. ... Date: 04/12/1961 ... Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. ... Date:""" >>> sequences = pipe( ... prompt, ... max_new_tokens=8, ... do_sample=True, ... top_k=10, ... ) >>> for seq in sequences: ... print(f"Result: {seq['generated_text']}") Result: Text: The first human went into space and orbited the Earth on April 12, 1961. Date: 04/12/1961 Text: The first-ever televised presidential debate in the United States took place on September 28, 1960, between presidential candidates John F. Kennedy and Richard Nixon. Date: 09/28/1960 ``` 上記のコード スニペットでは、モデルへの目的の出力を示すために 1 つの例を使用しました。したがって、これは、 「ワンショット」プロンプト。ただし、タスクの複雑さに応じて、複数の例を使用する必要がある場合があります。 数回のプロンプト手法の制限: - LLM は例のパターンを理解できますが、これらの手法は複雑な推論タスクではうまく機能しません。 - 少数ショットのプロンプトでは、長いプロンプトを作成する必要があります。大量のトークンを含むプロンプトでは、計算量と待ち時間が増加する可能性があります。プロンプトの長さにも制限があります。 - 多くの例を与えると、モデルが学習するつもりのなかったパターンを学習することがあります。 3番目の映画レビューはいつも否定的だということ。 ### Chain-of-thought 思考連鎖 (CoT) プロンプトは、モデルを微調整して中間推論ステップを生成し、改善する手法です。 複雑な推論タスクの結果。 モデルを操作して推論ステップを生成するには、2 つの方法があります。 - 質問に対する詳細な回答を含む例を示し、問題に対処する方法をモデルに示すことで、数回のプロンプトを表示します。 - 「ステップごとに考えてみましょう」または「深呼吸して、問題をステップごとに解決してください」などのフレーズを追加してモデルに推論を指示します。 [推論セクション](#reasoning) のマフィンの例に CoT テクニックを適用し、より大きなモデルを使用すると、 [HuggingChat](https://huggingface.co/chat/)で遊べる(`tiiuae/falcon-180B-chat`)など、 推論結果は大幅に改善されます。 ```text Let's go through this step-by-step: 1. You start with 15 muffins. 2. You eat 2 muffins, leaving you with 13 muffins. 3. You give 5 muffins to your neighbor, leaving you with 8 muffins. 4. Your partner buys 6 more muffins, bringing the total number of muffins to 14. 5. Your partner eats 2 muffins, leaving you with 12 muffins. Therefore, you now have 12 muffins. ``` ## Prompting vs fine-tuning プロンプトを最適化することで優れた結果を達成できますが、モデルを微調整するかどうかについてはまだ思案するかもしれません。 あなたの場合にはもっとうまくいくでしょう。より小規模なモデルを微調整することが好ましいオプションである場合のいくつかのシナリオを次に示します。 - ドメインが LLM が事前にトレーニングされたものと大きく異なっており、広範なプロンプト最適化では十分な結果が得られませんでした。 - モデルが低リソース言語で適切に動作する必要があります。 - 厳格な規制の下にある機密データでモデルをトレーニングする必要があります。 - コスト、プライバシー、インフラストラクチャ、またはその他の制限により、小規模なモデルを使用する必要があります。 上記のすべての例で、十分な大きさのファイルをすでに持っているか、簡単に入手できるかを確認する必要があります。 ドメイン固有のデータセットを合理的なコストでモデルを微調整できます。十分な時間とリソースも必要になります モデルを微調整します。 上記の例が当てはまらない場合は、プロンプトを最適化する方が有益であることがわかります。
transformers/docs/source/ja/tasks/prompting.md/0
{ "file_path": "transformers/docs/source/ja/tasks/prompting.md", "repo_id": "transformers", "token_count": 9975 }
41
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Summary of the tokenizers [[open-in-colab]] このページでは、トークナイゼーションについて詳しく見ていきます。 <Youtube id="VFp38yj8h3A"/> [前処理のチュートリアル](preprocessing)で見たように、テキストをトークン化することは、それを単語またはサブワードに分割し、それらをルックアップテーブルを介してIDに変換することです。単語またはサブワードをIDに変換することは簡単ですので、この要約ではテキストを単語またはサブワードに分割する(つまり、テキストをトークナイズする)ことに焦点を当てます。具体的には、🤗 Transformersで使用される3つの主要なトークナイザ、[Byte-Pair Encoding(BPE)](#byte-pair-encoding)、[WordPiece](#wordpiece)、および[SentencePiece](#sentencepiece)を見て、どのモデルがどのトークナイザタイプを使用しているかの例を示します。 各モデルページでは、事前トレーニング済みモデルがどのトークナイザタイプを使用しているかを知るために、関連するトークナイザのドキュメントを確認できます。例えば、[`BertTokenizer`]を見ると、モデルが[WordPiece](#wordpiece)を使用していることがわかります。 ## Introduction テキストをより小さなチャンクに分割することは、見かけ以上に難しいタスクであり、複数の方法があります。例えば、次の文を考えてみましょう。「"Don't you love 🤗 Transformers? We sure do."」 <Youtube id="nhJxYji1aho"/> このテキストをトークン化する簡単な方法は、スペースで分割することです。これにより、以下のようになります: ``` ["Don't", "you", "love", "🤗", "Transformers?", "We", "sure", "do."] ``` これは合理的な第一歩ですが、トークン "Transformers?" と "do." を見ると、句読点が単語 "Transformer" と "do" に結合されていることがわかり、これは最適ではありません。句読点を考慮に入れるべきで、モデルが単語とそれに続く可能性のあるすべての句読点記号の異なる表現を学ばなければならないことを避けるべきです。これにより、モデルが学ばなければならない表現の数が爆発的に増加します。句読点を考慮に入れた場合、例文のトークン化は次のようになります: ``` ["Don", "'", "t", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` ただし、単語「"Don't"」をトークン化する方法に関しては、不利な側面があります。 「"Don't"」は「"do not"」を表しているため、「["Do", "n't"]」としてトークン化する方が適しています。ここから事柄が複雑になり、各モデルが独自のトークナイザータイプを持つ理由の一部でもあります。テキストをトークン化するために適用するルールに応じて、同じテキストに対して異なるトークナイズされた出力が生成されます。事前トレーニング済みモデルは、トレーニングデータをトークナイズするのに使用されたルールと同じルールでトークナイズされた入力を提供する場合にのみ正常に機能します。 [spaCy](https://spacy.io/)と[Moses](http://www.statmt.org/moses/?n=Development.GetStarted)は、2つの人気のあるルールベースのトークナイザーです。これらを私たちの例に適用すると、*spaCy*と*Moses*は次のような出力を生成します: ``` ["Do", "n't", "you", "love", "🤗", "Transformers", "?", "We", "sure", "do", "."] ``` 空白と句読点のトークン化、およびルールベースのトークン化が使用されていることがわかります。空白と句読点のトークン化、およびルールベースのトークン化は、文を単語に分割することをゆるやかに定義される単語トークン化の例です。テキストをより小さなチャンクに分割するための最も直感的な方法である一方、このトークン化方法は大規模なテキストコーパスに対して問題を引き起こすことがあります。この場合、空白と句読点のトークン化は通常、非常に大きな語彙(すべての一意な単語とトークンのセット)を生成します。例えば、[Transformer XL](model_doc/transformerxl)は空白と句読点のトークン化を使用しており、語彙サイズは267,735です! このような大きな語彙サイズは、モデルに非常に大きな埋め込み行列を入力および出力レイヤーとして持たせることを強制し、メモリおよび時間の複雑さの増加を引き起こします。一般的に、トランスフォーマーモデルは、特に単一の言語で事前トレーニングされた場合、50,000を超える語彙サイズを持つことはほとんどありません。 したがって、シンプルな空白と句読点のトークン化が不十分な場合、なぜ単に文字単位でトークン化しないのかという疑問が生じますか? <Youtube id="ssLq_EK2jLE"/> 文字単位のトークン化は非常にシンプルであり、メモリと時間の複雑さを大幅に削減できますが、モデルに意味のある入力表現を学習させることが非常に難しくなります。たとえば、文字「"t"」のための意味のあるコンテキスト独立の表現を学習することは、単語「"today"」のためのコンテキスト独立の表現を学習するよりもはるかに難しいです。そのため、文字単位のトークン化はしばしばパフォーマンスの低下を伴います。したがって、トランスフォーマーモデルは単語レベルと文字レベルのトークン化のハイブリッドである**サブワード**トークン化を使用して、両方の世界の利点を活かします。 ## Subword tokenization <Youtube id="zHvTiHr506c"/> サブワードトークン化アルゴリズムは、頻繁に使用される単語をより小さなサブワードに分割すべきではないが、珍しい単語は意味のあるサブワードに分解されるという原則に依存しています。たとえば、「"annoyingly"」は珍しい単語と見なされ、その単語は「"annoying"」と「"ly"」に分解されるかもしれません。独立した「"annoying"」と「"ly"」はより頻繁に現れますが、「"annoyingly"」の意味は「"annoying"」と「"ly"」の合成的な意味によって保持されます。これは特にトルコ語などの結合言語で役立ちます。ここではサブワードを連結して(ほぼ)任意の長い複雑な単語を形成できます。 サブワードトークン化により、モデルは合理的な語彙サイズを持つことができ、意味のあるコンテキスト独立の表現を学習できます。さらに、サブワードトークン化により、モデルは以前に見たことのない単語を処理し、それらを既知のサブワードに分解することができます。例えば、[`~transformers.BertTokenizer`]は`"I have a new GPU!"`を以下のようにトークン化します: ```py >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> tokenizer.tokenize("I have a new GPU!") ["i", "have", "a", "new", "gp", "##u", "!"] ``` 「uncased」モデルを考慮しているため、まず文を小文字に変換しました。トークナイザの語彙に「["i", "have", "a", "new"]」という単語が存在することがわかりますが、「"gpu"」という単語は存在しません。したがって、トークナイザは「"gpu"」を既知のサブワード「["gp"、"##u"]」に分割します。ここで「"##"」は、トークンのデコードまたはトークナイゼーションの逆転のために、トークンの前の部分にスペースなしで接続する必要があることを意味します。 別の例として、[`~transformers.XLNetTokenizer`]は以下のように以前のサンプルテキストをトークン化します: ```py >>> from transformers import XLNetTokenizer >>> tokenizer = XLNetTokenizer.from_pretrained("xlnet/xlnet-base-cased") >>> tokenizer.tokenize("Don't you love 🤗 Transformers? We sure do.") ["▁Don", "'", "t", "▁you", "▁love", "▁", "🤗", "▁", "Transform", "ers", "?", "▁We", "▁sure", "▁do", "."] ``` これらの「▁」の意味については、[SentencePiece](#sentencepiece)を見るときに詳しく説明します。ご覧の通り、「Transformers」という珍しい単語は、より頻繁に現れるサブワード「Transform」と「ers」に分割されています。 さて、異なるサブワードトークン化アルゴリズムがどのように動作するかを見てみましょう。これらのトークナイゼーションアルゴリズムはすべて、通常は対応するモデルがトレーニングされるコーパスで行われる形式のトレーニングに依存しています。 <a id='byte-pair-encoding'></a> ### Byte-Pair Encoding(BPE) Byte-Pair Encoding(BPE)は、[Neural Machine Translation of Rare Words with Subword Units(Sennrich et al., 2015)](https://arxiv.org/abs/1508.07909)で導入されました。BPEは、トレーニングデータを単語に分割するプリトークナイザに依存しています。プリトークナイゼーションは、空白のトークナイゼーションなど、非常に単純なものであることがあります。例えば、[GPT-2](model_doc/gpt2)、[RoBERTa](model_doc/roberta)です。より高度なプリトークナイゼーションには、ルールベースのトークナイゼーション([XLM](model_doc/xlm)、[FlauBERT](model_doc/flaubert)などが大部分の言語にMosesを使用)や、[GPT](model_doc/gpt)(Spacyとftfyを使用してトレーニングコーパス内の各単語の頻度を数える)などが含まれます。 プリトークナイゼーションの後、一意の単語セットが作成され、各単語がトレーニングデータで出現した頻度が決定されます。次に、BPEはベース語彙を作成し、ベース語彙の二つのシンボルから新しいシンボルを形成するためのマージルールを学習します。このプロセスは、語彙が所望の語彙サイズに達するまで続けられます。なお、所望の語彙サイズはトークナイザをトレーニングする前に定義するハイパーパラメータであることに注意してください。 例として、プリトークナイゼーションの後、次のセットの単語とその出現頻度が決定されたと仮定しましょう: ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` したがって、ベース語彙は「["b", "g", "h", "n", "p", "s", "u"]」です。すべての単語をベース語彙のシンボルに分割すると、次のようになります: ``` ("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5) ``` その後、BPEは可能なすべてのシンボルペアの頻度を数え、最も頻繁に発生するシンボルペアを選択します。上記の例では、`"h"`の後に`"u"`が15回(`"hug"`の10回、`"hugs"`の5回)出現します。しかし、最も頻繁なシンボルペアは、合計で20回(`"u"`の10回、`"g"`の5回、`"u"`の5回)出現する`"u"`の後に`"g"`が続くシンボルペアです。したがって、トークナイザが最初に学習するマージルールは、`"u"`の後に`"g"`が続くすべての`"u"`シンボルを一緒にグループ化することです。次に、`"ug"`が語彙に追加されます。単語のセットは次になります: ``` ("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5) ``` 次に、BPEは次に最も一般的なシンボルペアを識別します。それは「"u"」に続いて「"n"」で、16回出現します。したがって、「"u"」と「"n"」は「"un"」に結合され、語彙に追加されます。次に最も頻度の高いシンボルペアは、「"h"」に続いて「"ug"」で、15回出現します。再びペアが結合され、「hug」が語彙に追加できます。 この段階では、語彙は`["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"]`であり、一意の単語のセットは以下のように表されます: ``` ("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5) ``` 前提として、Byte-Pair Encoding(BPE)のトレーニングがこの段階で停止すると、学習されたマージルールが新しい単語に適用されます(新しい単語にはベースボキャブラリに含まれていないシンボルが含まれていない限り)。 例えば、単語 "bug" は ["b", "ug"] としてトークン化されますが、"mug" はベースボキャブラリに "m" シンボルが含まれていないため、["<unk>", "ug"] としてトークン化されます。 一般的に、"m" のような単一の文字は、トレーニングデータには通常、各文字の少なくとも1つの出現が含まれているため、"<unk>" シンボルに置き換えられることはありませんが、絵文字のような非常に特殊な文字の場合には発生する可能性があります。 前述のように、ボキャブラリサイズ、すなわちベースボキャブラリサイズ + マージの回数は選択するハイパーパラメータです。 例えば、[GPT](model_doc/gpt) はベース文字が478文字で、40,000回のマージ後にトレーニングを停止したため、ボキャブラリサイズは40,478です。 #### Byte-level BPE すべてのUnicode文字をベース文字と考えると、すべての可能なベース文字が含まれるかもしれないベースボキャブラリはかなり大きくなることがあります。 [GPT-2](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) は、ベースボキャブラリを256バイトにする賢いトリックとしてバイトをベースボキャブラリとして使用し、すべてのベース文字がボキャブラリに含まれるようにしています。 パンクチュエーションを扱うためのいくつかの追加ルールを備えたGPT2のトークナイザは、<unk> シンボルを必要とせずにすべてのテキストをトークン化できます。 [GPT-2](model_doc/gpt) は50,257のボキャブラリサイズを持っており、これは256バイトのベーストークン、特別なテキストの終了を示すトークン、および50,000回のマージで学習したシンボルに対応しています。 ### WordPiece WordPieceは、[BERT](model_doc/bert)、[DistilBERT](model_doc/distilbert)、および[Electra](model_doc/electra)で使用されるサブワードトークナイゼーションアルゴリズムです。 このアルゴリズムは、[Japanese and Korean Voice Search (Schuster et al., 2012)](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) で概説されており、BPEに非常に似ています。 WordPieceは最も頻繁なシンボルペアを選択するのではなく、トレーニングデータに追加した場合にトレーニングデータの尤度を最大化するシンボルペアを選択します。 これは具体的にはどういう意味ですか?前の例を参照すると、トレーニングデータの尤度を最大化することは、そのシンボルペアの確率をその最初のシンボルに続く2番目のシンボルの確率で割ったものが、すべてのシンボルペアの中で最も大きい場合に該当するシンボルペアを見つけることに等しいです。 たとえば、"u" の後に "g" が続く場合、他のどのシンボルペアよりも "ug" の確率を "u"、"g" で割った確率が高ければ、それらのシンボルは結合されます。直感的に言えば、WordPieceは2つのシンボルを結合することによって失われるものを評価し、それがそれに値するかどうかを確認する点でBPEとはわずかに異なります。 ### Unigram Unigramは、[Subword Regularization: Improving Neural Network Translation Models with Multiple Subword Candidates (Kudo, 2018)](https://arxiv.org/pdf/1804.10959.pdf) で導入されたサブワードトークナイゼーションアルゴリズムです。 BPEやWordPieceとは異なり、Unigramはベースボキャブラリを多数のシンボルで初期化し、各シンボルを削減してより小さなボキャブラリを取得します。 ベースボキャブラリは、事前にトークン化されたすべての単語と最も一般的な部分文字列に対応する可能性があります。 Unigramはtransformersのモデルの直接の使用には適していませんが、[SentencePiece](#sentencepiece)と組み合わせて使用されます。 各トレーニングステップで、Unigramアルゴリズムは現在のボキャブラリとユニグラム言語モデルを使用してトレーニングデータ上の損失(通常は対数尤度として定義)を定義します。その後、ボキャブラリ内の各シンボルについて、そのシンボルがボキャブラリから削除された場合に全体の損失がどれだけ増加するかを計算します。 Unigramは、損失の増加が最も低いp(通常は10%または20%)パーセントのシンボルを削除します。つまり、トレーニングデータ全体の損失に最も影響を与えない、最も損失の少ないシンボルを削除します。 このプロセスは、ボキャブラリが望ましいサイズに達するまで繰り返されます。 Unigramアルゴリズムは常にベース文字を保持するため、任意の単語をトークン化できます。 Unigramはマージルールに基づいていないため(BPEとWordPieceとは対照的に)、トレーニング後の新しいテキストのトークン化にはいくつかの方法があります。例として、トレーニングされたUnigramトークナイザが持つボキャブラリが次のような場合: ``` ["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"], ``` `"hugs"`は、`["hug", "s"]`、`["h", "ug", "s"]`、または`["h", "u", "g", "s"]`のようにトークン化できます。では、どれを選択すべきでしょうか? Unigramは、トレーニングコーパス内の各トークンの確率を保存し、トレーニング後に各可能なトークン化の確率を計算できるようにします。このアルゴリズムは実際には最も可能性の高いトークン化を選択しますが、確率に従って可能なトークン化をサンプリングするオプションも提供します。 これらの確率は、トークナイザーがトレーニングに使用する損失によって定義されます。トレーニングデータが単語 \\(x_{1}, \dots, x_{N}\\) で構成され、単語 \\(x_{i}\\) のすべての可能なトークン化のセットが \\(S(x_{i})\\) と定義される場合、全体の損失は次のように定義されます。 $$\mathcal{L} = -\sum_{i=1}^{N} \log \left ( \sum_{x \in S(x_{i})} p(x) \right )$$ <a id='sentencepiece'></a> ### SentencePiece これまでに説明したすべてのトークン化アルゴリズムには同じ問題があります。それは、入力テキストが単語を区切るためにスペースを使用していると仮定しているということです。しかし、すべての言語が単語を区切るためにスペースを使用しているわけではありません。この問題を一般的に解決するための1つの方法は、言語固有の前トークナイザーを使用することです(例:[XLM](model_doc/xlm)は特定の中国語、日本語、およびタイ語の前トークナイザーを使用しています)。より一般的にこの問題を解決するために、[SentencePiece:ニューラルテキスト処理のためのシンプルで言語非依存のサブワードトークナイザーおよびデトークナイザー(Kudo et al.、2018)](https://arxiv.org/pdf/1808.06226.pdf) は、入力を生の入力ストリームとして扱い、スペースを使用する文字のセットに含めます。それからBPEまたはunigramアルゴリズムを使用して適切な語彙を構築します。 たとえば、[`XLNetTokenizer`]はSentencePieceを使用しており、そのために前述の例で`"▁"`文字が語彙に含まれていました。SentencePieceを使用したデコードは非常に簡単で、すべてのトークンを単純に連結し、`"▁"`はスペースに置換されます。 ライブラリ内のすべてのtransformersモデルは、SentencePieceをunigramと組み合わせて使用します。SentencePieceを使用するモデルの例には、[ALBERT](model_doc/albert)、[XLNet](model_doc/xlnet)、[Marian](model_doc/marian)、および[T5](model_doc/t5)があります。
transformers/docs/source/ja/tokenizer_summary.md/0
{ "file_path": "transformers/docs/source/ja/tokenizer_summary.md", "repo_id": "transformers", "token_count": 9819 }
42
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🤗 Transformers에 기여하기 [[contribute-to-transformers]] 누구나 🤗 Transformers에 기여할 수 있으며, 우리는 모든 사람의 기여를 소중히 생각합니다. 코드 기여는 커뮤니티를 돕는 유일한 방법이 아닙니다. 질문에 답하거나 다른 사람을 도와 문서를 개선하는 것도 매우 가치가 있습니다. 🤗 Transformers를 널리 알리는 것도 큰 도움이 됩니다! 멋진 프로젝트들을 가능하게 한 🤗 Transformers 라이브러리에 대해 블로그 게시글에 언급하거나, 도움이 되었을 때마다 Twitter에 알리거나, 저장소에 ⭐️ 를 표시하여 감사 인사를 전해주세요. 어떤 방식으로 기여하든 [행동 규칙](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md)을 숙지하고 존중해주세요. **이 안내서는 멋진 [scikit-learn 기여 안내서](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md)에서 큰 영감을 받았습니다.** ## 기여하는 방법 [[ways-to-contribute]] 여러 가지 방법으로 🤗 Transformers에 기여할 수 있습니다: * 기존 코드의 미해결된 문제를 수정합니다. * 버그 또는 새로 추가되길 원하는 기능과 관련된 이슈를 제출합니다. * 새로운 모델을 구현합니다. * 예제나 문서에 기여합니다. 어디서부터 시작할지 모르겠다면, [Good First Issue](https://github.com/huggingface/transformers/contribute) 목록을 확인해보세요. 이 목록은 초보자도 참여하기 쉬운 오픈 이슈 목록을 제공하며, 당신이 오픈소스에 처음으로 기여하는 데 큰 도움이 될 것입니다. 그저 작업하고 싶은 이슈에 댓글만 달아주면 됩니다. 조금 더 도전적인 작업을 원한다면, [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) 목록도 확인해보세요. 이미 당신이 잘 하고 있다고 생각되더라도, 한 번 시도해보세요! 우리도 여러분을 도울 것입니다. 🚀 > 커뮤니티에 이루어지는 모든 기여는 똑같이 소중합니다. 🥰 ## 미해결된 문제 수정하기 [[fixing-outstanding-issues]] 기존 코드에서 발견한 문제점에 대한 해결책이 떠오른 경우, 언제든지 [기여를 시작](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#create-a-pull-request)하고 Pull Request를 생성해주세요! ## 버그 관련 이슈를 제기하거나 새로운 기능 요청하기 [[submitting-a-bugrelated-issue-or-feature-request]] 버그 관련 이슈를 제기하거나 새로운 기능을 요청할 때는 다음 가이드라인을 최대한 준수해주세요. 이렇게 하면 좋은 피드백과 함께 빠르게 답변해 드릴 수 있습니다. ### 버그를 발견하셨나요? [[did-you-find-a-bug]] 🤗 Transformers 라이브러리는 사용 중에 겪는 문제를 보고해주는 사용자들 덕분에 더욱 견고해지고 신뢰할 수 있게 되었습니다. 이슈를 보고하기 전에, 버그가 이미 **보고되지 않았는지** 확인해주세요. (GitHub의 이슈 탭 아래의 검색 바를 사용하세요). 이슈는 라이브러리 자체에서 발생한 버그어야 하며, 코드의 다른 부분과 관련된 것이 아니어야 합니다. 버그가 라이브러리의 문제로 발생하였는지 확실하지 않은 경우 먼저 [포럼](https://discuss.huggingface.co/)에서 질문해 주세요. 이렇게 하면 일반적인 질문보다 라이브러리와 관련된 문제를 더 빠르게 해결할 수 있습니다. 버그가 이미 보고되지 않았다는 것을 확인했다면, 다음 정보를 포함하여 이슈를 제출해 주세요. 그러면 우리가 빠르게 해결할 수 있습니다: * 사용 중인 **운영체제 종류와 버전**, 그리고 **Python**, **PyTorch** 또는 **TensorFlow** 버전. * 버그를 30초 이내로 재현할 수 있는 간단하고 독립적인 코드 스니펫. * 예외가 발생한 경우 *전체* 트레이스백. * 스크린샷과 같이 도움이 될 것으로 생각되는 추가 정보를 첨부해 주세요. 운영체제와 소프트웨어 버전을 자동으로 가져오려면 다음 명령을 실행하세요: ```bash transformers-cli env ``` 저장소의 루트 디렉터리에서도 같은 명령을 실행할 수 있습니다: ```bash python src/transformers/commands/transformers_cli.py env ``` ### 새로운 기능을 원하시나요? [[do-you-want-a-new-feature]] 🤗 Transformers에서 사용하고 싶은 새로운 기능이 있다면, 다음 내용을 포함하여 이슈를 제출해 주세요: 1. 이 기능이 필요한 *이유*는 무엇인가요? 라이브러리에 대한 문제나 불만과 관련이 있나요? 프로젝트에 필요한 기능인가요? 커뮤니티에 도움이 될 만한 기능인가요? 어떤 내용이든 여러분의 이야기를 듣고 싶습니다! 2. 요청하는 기능을 최대한 자세히 설명해 주세요. 더 많은 정보를 제공할수록 더 나은 도움을 드릴 수 있습니다. 3. 해당 기능의 사용법을 보여주는 *코드 스니펫*을 제공해 주세요. 4. 기능과 관련된 논문이 있는 경우 링크를 포함해 주세요. 이슈가 잘 작성되었다면 이슈가 생성된 순간, 이미 80% 정도의 작업이 완료된 것입니다. 이슈를 제기하는 데 도움이 될 만한 [템플릿](https://github.com/huggingface/transformers/tree/main/templates)도 준비되어 있습니다. ## 새로운 모델을 구현하고 싶으신가요? [[do-you-want-to-implement-a-new-model]] 새로운 모델은 계속해서 출시됩니다. 만약 여러분이 새로운 모델을 구현하고 싶다면 다음 정보를 제공해 주세요: * 모델에 대한 간단한 설명과 논문 링크. * 구현이 공개되어 있다면 구현 링크. * 모델 가중치가 사용 가능하다면 가중치 링크. 만약 모델을 직접 기여하고 싶으시다면, 알려주세요. 🤗 Transformers에 추가할 수 있도록 도와드리겠습니다! [🤗 Transformers에 새로운 모델을 추가하는 방법](https://huggingface.co/docs/transformers/add_new_model)에 대한 기술적인 안내서도 있습니다. ## 문서를 추가하고 싶으신가요? [[do-you-want-to-add-documentation]] 우리는 언제나 더 명확하고 정확한 문서를 제공하기 위하여 개선점을 찾고 있습니다. 오탈자나 부족한 내용, 분명하지 않거나 부정확한 내용 등을 알려주시면 개선하는 데 도움이 됩니다. 관심이 있으시다면 변경하거나 기여하실 수 있도록 도와드리겠습니다! 문서를 생성, 빌드 및 작성하는 방법에 대한 자세한 내용은 [README](https://github.com/huggingface/transformers/tree/main/docs) 문서를 확인해 주세요. ## 풀 리퀘스트(Pull Request) 생성하기 [[create-a-pull-request]] 코드를 작성하기 전에 기존의 Pull Request나 이슈를 검색하여 누군가 이미 동일한 작업을 하고 있는지 확인하는 것이 좋습니다. 확실하지 않다면 피드백을 받기 위해 이슈를 열어보는 것이 좋습니다. 🤗 Transformers에 기여하기 위해서는 기본적인 `git` 사용 능력이 필요합니다. `git`은 사용하기 쉬운 도구는 아니지만, 매우 훌륭한 매뉴얼을 제공합니다. 쉘(shell)에서 `git --help`을 입력하여 확인해보세요! 만약 책을 선호한다면, [Pro Git](https://git-scm.com/book/en/v2)은 매우 좋은 참고 자료가 될 것입니다. 🤗 Transformers에 기여하려면 **[Python 3.9](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** 이상의 버전이 필요합니다. 기여를 시작하려면 다음 단계를 따르세요: 1. 저장소 페이지에서 **[Fork](https://github.com/huggingface/transformers/fork)** 버튼을 클릭하여 저장소를 포크하세요. 이렇게 하면 코드의 복사본이 여러분의 GitHub 사용자 계정 아래에 생성됩니다. 2. 포크한 저장소를 로컬 디스크로 클론하고, 기본 저장소를 원격(remote)으로 추가하세요: ```bash git clone git@github.com:<your Github handle>/transformers.git cd transformers git remote add upstream https://github.com/huggingface/transformers.git ``` 3. 개발 변경 사항을 저장할 새 브랜치를 생성하세요: ```bash git checkout -b a-descriptive-name-for-my-changes ``` 🚨 절대 `main` 브랜치에서 작업하지 **마세요!** 4. 가상 환경에서 다음 명령을 실행하여 개발 환경을 설정하세요: ```bash pip install -e ".[dev]" ``` 만약 이미 가상 환경에 🤗 Transformers가 설치되어 있다면, `-e` 플래그를 사용하여 설치하기 전에 `pip uninstall transformers`로 제거해주세요. 여러분의 운영체제에 따라서, 그리고 🤗 Transformers의 선택적 의존성의 수가 증가하면서, 이 명령이 실패할 수도 있습니다. 그럴 경우 사용하려는 딥러닝 프레임워크(PyTorch, TensorFlow, 그리고/또는 Flax)를 설치한 후 아래 명령을 실행해주세요: ```bash pip install -e ".[quality]" ``` 대부분의 경우 이것으로 충분할 것입니다. 5. 브랜치에서 기능을 개발하세요. 코드를 작업하는 동안 테스트 스위트(test suite)가 통과하는지 확인하세요. 다음과 같이 변경 사항에 영향을 받는 테스트를 실행하세요: ```bash pytest tests/<TEST_TO_RUN>.py ``` 테스트에 대한 더 많은 정보는 [테스트](https://huggingface.co/docs/transformers/testing) 가이드를 확인하세요. 🤗 Transformers는 `black`과 `ruff`를 사용하여 소스 코드의 형식을 일관되게 유지합니다. 변경 사항을 적용한 후에는 다음 명령으로 자동으로 스타일 교정 및 코드 검증을 수행하세요: ```bash make fixup ``` 이것은 또한 작업 중인 PR에서 수정한 파일에서만 작동하도록 최적화되어 있습니다. 검사를 하나씩 실행하려는 경우, 다음 명령으로 스타일 교정을 적용할 수 있습니다: ```bash make style ``` 🤗 Transformers는 또한 `ruff`와 몇 가지 사용자 정의 스크립트를 사용하여 코딩 실수를 확인합니다. CI를 통해 품질 관리가 수행되지만, 다음 명령으로 동일한 검사를 실행할 수 있습니다: ```bash make quality ``` 마지막으로, 새 모델을 추가할 때 일부 파일을 업데이트하는 것을 잊지 않도록 하기 위한 많은 스크립트가 있습니다. 다음 명령으로 이러한 스크립트를 실행할 수 있습니다: ```bash make repo-consistency ``` 이러한 검사에 대해 자세히 알아보고 관련 문제를 해결하는 방법은 [Pull Request에 대한 검사](https://huggingface.co/docs/transformers/pr_checks) 가이드를 확인하세요. 만약 `docs/source` 디렉터리 아래의 문서를 수정하는 경우, 문서가 빌드될 수 있는지 확인하세요. 이 검사는 Pull Request를 열 때도 CI에서 실행됩니다. 로컬 검사를 실행하려면 문서 빌더를 설치해야 합니다: ```bash pip install ".[docs]" ``` 저장소의 루트 디렉터리에서 다음 명령을 실행하세요: ```bash doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build ``` 이 명령은 `~/tmp/test-build` 폴더에 문서를 빌드하며, 생성된 Markdown 파일을 선호하는 편집기로 확인할 수 있습니다. Pull Request를 열 때 GitHub에서 문서를 미리 볼 수도 있습니다. 변경 사항에 만족하면 `git add`로 변경된 파일을 추가하고, `git commit`으로 변경 사항을 로컬에 기록하세요: ```bash git add modified_file.py git commit ``` [좋은 커밋 메시지](https://chris.beams.io/posts/git-commit/)를 작성하여 변경 사항을 명확하게 전달하세요! 변경 사항을 프로젝트 원본 저장소와 동기화하려면, PR을 *열기 전에* 브랜치를 `upstream/branch`로 리베이스(rebase)하세요. 또는 관리자의 요청에 이 작업이 필요할 수 있습니다: ```bash git fetch upstream git rebase upstream/main ``` 변경 사항을 브랜치에 푸시하세요: ```bash git push -u origin a-descriptive-name-for-my-changes ``` 이미 PR을 열었다면, `--force` 플래그와 함께 강제 푸시해야 합니다. 아직 PR이 열리지 않았다면 정상적으로 변경 사항을 푸시하면 됩니다. 6. 이제 GitHub에서 포크한 저장소로 이동하고 **Pull request(풀 리퀘스트)**를 클릭하여 Pull Request를 열 수 있습니다. 아래의 [체크리스트](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist)에서 모든 항목에 체크 표시를 하세요. 준비가 완료되면 프로젝트 관리자에게 변경 사항을 보내 검토를 요청할 수 있습니다. 7. 관리자가 변경 사항을 요청해도 괜찮습니다. 핵심 기여자들도 동일한 상황을 겪습니다! 모두가 변경 사항을 Pull Request에서 볼 수 있도록, 로컬 브랜치에서 작업하고 변경 사항을 포크한 저장소로 푸시하세요. 그러면 변경 사항이 자동으로 Pull Request에 나타납니다. ### Pull Request 체크리스트 [[pull-request-checklist]] ☐ Pull Request 제목은 기여 내용을 요약해야 합니다.<br> ☐ Pull Request가 이슈를 해결하는 경우, Pull Request 설명에 이슈 번호를 언급하여 연관되어 있음을 알려주세요. (이슈를 확인하는 사람들이 해당 이슈에 대한 작업이 진행 중임을 알 수 있게 합니다).<br> ☐ 작업이 진행중이라면 제목 앞에 `[WIP]`를 붙여주세요. 중복 작업을 피하고 병합할 준비가 된 PR과 구분하기에 유용합니다.<br> ☐ 기존 테스트를 통과하는지 확인하세요.<br> ☐ 새로운 기능을 추가하는 경우, 해당 기능에 대한 테스트도 추가하세요.<br> - 새 모델을 추가하는 경우, `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`을 사용하여 일반적인 테스트를 활성화하세요. - 새 `@slow` 테스트를 추가하는 경우, 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`. - 새 토크나이저를 추가하는 경우, 테스트를 작성하고 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py`. - CircleCI에서는 느린 테스트를 실행하지 않지만, GitHub Actions에서는 매일 밤 실행됩니다!<br> ☐ 모든 공개 메소드는 유용한 기술문서를 가져야 합니다 (예를 들어 [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) 참조).<br> ☐ 저장소가 빠르게 성장하고 있으므로 저장소에 상당한 부담을 주는 이미지, 동영상 및 기타 텍스트가 아닌 파일은 추가하지 마세요. 대신 [`hf-internal-testing`](https://huggingface.co/hf-internal-testing)과 같은 Hub 저장소를 사용하여 이러한 파일을 호스팅하고 URL로 참조하세요. 문서와 관련된 이미지는 다음 저장소에 배치하는 것을 권장합니다: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). 이 데이터셋 저장소에서 PR을 열어서 Hugging Face 멤버에게 병합을 요청할 수 있습니다. Pull Request에서 실행되는 검사에 대한 자세한 정보는 [Pull Request에 대한 검사](https://huggingface.co/docs/transformers/pr_checks) 가이드를 확인하세요. ### 테스트 [[tests]] 라이브러리 동작과 여러 예제를 테스트할 수 있는 광범위한 테스트 스위트가 포함되어 있습니다. 라이브러리 테스트는 [tests](https://github.com/huggingface/transformers/tree/main/tests) 폴더에, 예제 테스트는 [examples](https://github.com/huggingface/transformers/tree/main/examples) 폴더에 있습니다. 속도가 빠른 `pytest`와 `pytest-xdist`를 선호합니다. 저장소의 루트 디렉터리에서 테스트를 실행할 *하위 폴더 경로 또는 테스트 파일 경로*를 지정하세요: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model ``` 마찬가지로 `examples` 디렉터리에서도 *하위 폴더 경로 또는 테스트 파일 경로*를 지정하세요. 예를 들어, 다음 명령은 PyTorch `examples` 디렉터리의 텍스트 분류 하위 폴더를 테스트합니다: ```bash pip install -r examples/xxx/requirements.txt # only needed the first time python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` 이것이 실제로 `make test` 및 `make test-examples` 명령이 구현되는 방식입니다 (`pip install`은 제외합니다)! 또한 특정 기능만 테스트하기 위한 더 작은 테스트를 지정할 수 있습니다. 기본적으로 느린 테스트는 건너뛰지만 `RUN_SLOW` 환경 변수를 `yes`로 설정하여 실행할 수 있습니다. 이렇게 하면 많은 기가바이트 단위의 모델이 다운로드되므로 충분한 디스크 공간, 좋은 인터넷 연결과 많은 인내가 필요합니다! <Tip warning={true}> 테스트를 실행하려면 *하위 폴더 경로 또는 테스트 파일 경로*를 지정하세요. 그렇지 않으면 `tests` 또는 `examples` 폴더의 모든 테스트를 실행하게 되어 매우 긴 시간이 걸립니다! </Tip> ```bash RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification ``` 느린 테스트와 마찬가지로, 다음과 같이 테스트 중에 기본적으로 활성화되지 않는 다른 환경 변수도 있습니다: - `RUN_CUSTOM_TOKENIZERS`: 사용자 정의 토크나이저 테스트를 활성화합니다. - `RUN_PT_FLAX_CROSS_TESTS`: PyTorch + Flax 통합 테스트를 활성화합니다. - `RUN_PT_TF_CROSS_TESTS`: TensorFlow + PyTorch 통합 테스트를 활성화합니다. 더 많은 환경 변수와 추가 정보는 [testing_utils.py](src/transformers/testing_utils.py)에서 찾을 수 있습니다. 🤗 Transformers는 테스트 실행기로 `pytest`를 사용합니다. 그러나 테스트 스위트 자체에서는 `pytest` 관련 기능을 사용하지 않습니다. 이것은 `unittest`가 완전히 지원된다는 것을 의미합니다. 다음은 `unittest`로 테스트를 실행하는 방법입니다: ```bash python -m unittest discover -s tests -t . -v python -m unittest discover -s examples -t examples -v ``` ### 스타일 가이드 [[style-guide]] 문서는 [Google Python 스타일 가이드](https://google.github.io/styleguide/pyguide.html)를 따릅니다. 자세한 정보는 [문서 작성 가이드](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)를 확인하세요. ### Windows에서 개발 [[develop-on-windows]] Windows에서 개발할 경우([Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) 또는 WSL에서 작업하지 않는 한) Windows `CRLF` 줄 바꿈을 Linux `LF` 줄 바꿈으로 변환하도록 git을 구성해야 합니다: ```bash git config core.autocrlf input ``` Windows에서 `make` 명령을 실행하는 한 가지 방법은 MSYS2를 사용하는 것입니다: 1. [MSYS2](https://www.msys2.org/)를 다운로드합니다. `C:\msys64`에 설치되었다고 가정합니다. 2. CLI에서 `C:\msys64\msys2.exe`를 엽니다 (시작 메뉴에서 사용 가능해야 함). 3. 쉘에서 다음을 실행하여: `pacman -Syu` 및 `pacman -S make`로 `make`를 설치합니다. 4. 환경 변수 PATH에 `C:\msys64\usr\bin`을 추가하세요. 이제 모든 터미널 (PowerShell, cmd.exe 등)에서 `make`를 사용할 수 있습니다! 🎉 ### 포크한 저장소를 상위 원본 브랜치(main)과 동기화하기 (Hugging Face 저장소) [[sync-a-forked-repository-with-upstream-main-the-hugging-face-repository]] 포크한 저장소의 main 브랜치를 업데이트할 때, 다음 단계를 따라 수행해주세요. 이렇게 하면 각 upstream PR에 참조 노트가 추가되는 것을 피하고 이러한 PR에 관여하는 개발자들에게 불필요한 알림이 전송되는 것을 방지할 수 있습니다. 1. 가능하면 포크된 저장소의 브랜치 및 PR을 사용하여 upstream과 동기화하지 마세요. 대신 포크된 main 저장소에 직접 병합하세요. 2. PR이 반드시 필요한 경우, 브랜치를 확인한 후 다음 단계를 사용하세요: ```bash git checkout -b your-branch-for-syncing git pull --squash --no-commit upstream main git commit -m '<your message without GitHub references>' git push --set-upstream origin your-branch-for-syncing ```
transformers/docs/source/ko/contributing.md/0
{ "file_path": "transformers/docs/source/ko/contributing.md", "repo_id": "transformers", "token_count": 15765 }
43
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTweet [[bertweet]] ## 개요 [[overview]] BERTweet 모델은 Dat Quoc Nguyen, Thanh Vu, Anh Tuan Nguyen에 의해 [BERTweet: A pre-trained language model for English Tweets](https://www.aclweb.org/anthology/2020.emnlp-demos.2.pdf) 에서 제안되었습니다. 해당 논문의 초록 : *영어 트윗을 위한 최초의 공개 대규모 사전 학습된 언어 모델인 BERTweet을 소개합니다. BERTweet은 BERT-base(Devlin et al., 2019)와 동일한 아키텍처를 가지고 있으며, RoBERTa 사전 학습 절차(Liu et al., 2019)를 사용하여 학습되었습니다. 실험 결과, BERTweet은 강력한 기준 모델인 RoBERTa-base 및 XLM-R-base(Conneau et al., 2020)의 성능을 능가하여 세 가지 트윗 NLP 작업(품사 태깅, 개체명 인식, 텍스트 분류)에서 이전 최신 모델보다 더 나은 성능을 보여주었습니다.* 이 모델은 [dqnguyen](https://huggingface.co/dqnguyen) 께서 기여하셨습니다. 원본 코드는 [여기](https://github.com/VinAIResearch/BERTweet).에서 확인할 수 있습니다. ## 사용 예시 [[usage-example]] ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bertweet = AutoModel.from_pretrained("vinai/bertweet-base") >>> # 트랜스포머 버전 4.x 이상 : >>> tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base", use_fast=False) >>> # 트랜스포머 버전 3.x 이상: >>> # tokenizer = AutoTokenizer.from_pretrained("vinai/bertweet-base") >>> # 입력된 트윗은 이미 정규화되었습니다! >>> line = "SC has first two presumptive cases of coronavirus , DHEC confirms HTTPURL via @USER :cry:" >>> input_ids = torch.tensor([tokenizer.encode(line)]) >>> with torch.no_grad(): ... features = bertweet(input_ids) # Models outputs are now tuples >>> # With TensorFlow 2.0+: >>> # from transformers import TFAutoModel >>> # bertweet = TFAutoModel.from_pretrained("vinai/bertweet-base") ``` <Tip> 이 구현은 토큰화 방법을 제외하고는 BERT와 동일합니다. API 참조 정보는 [BERT 문서](bert) 를 참조하세요. </Tip> ## Bertweet 토큰화(BertweetTokenizer) [[transformers.BertweetTokenizer]] [[autodoc]] BertweetTokenizer
transformers/docs/source/ko/model_doc/bertweet.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/bertweet.md", "repo_id": "transformers", "token_count": 1434 }
44
<!--Copyright 2022 The HuggingFace Team and Microsoft. All rights reserved. Licensed under the MIT License; you may not use this file except in compliance with the License. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Graphormer[[graphormer]] <Tip warning={true}> 이 모델은 유지 보수 모드로만 운영되며, 코드를 변경하는 새로운 PR(Pull Request)은 받지 않습니다. 이 모델을 실행하는 데 문제가 발생한다면, 이 모델을 지원하는 마지막 버전인 v4.40.2를 다시 설치해 주세요. 다음 명령어를 실행하여 재설치할 수 있습니다: `pip install -U transformers==4.40.2`. </Tip> ## 개요[[overview]] Graphormer 모델은 Chengxuan Ying, Tianle Cai, Shengjie Luo, Shuxin Zheng, Guolin Ke, Di He, Yanming Shen, Tie-Yan Liu가 제안한 [트랜스포머가 그래프 표현에 있어서 정말 약할까?](https://arxiv.org/abs/2106.05234) 라는 논문에서 소개되었습니다. Graphormer는 그래프 트랜스포머 모델입니다. 텍스트 시퀀스 대신 그래프에서 계산을 수행할 수 있도록 수정되었으며, 전처리와 병합 과정에서 임베딩과 관심 특성을 생성한 후 수정된 어텐션을 사용합니다. 해당 논문의 초록입니다: *트랜스포머 아키텍처는 자연어 처리와 컴퓨터 비전 등 많은 분야에서 지배적인 선택을 받고 있는 아키텍처 입니다. 그러나 그래프 수준 예측 리더보드 상에서는 주류 GNN 변형모델들에 비해 경쟁력 있는 성능을 달성하지 못했습니다. 따라서 트랜스포머가 그래프 표현 학습에서 어떻게 잘 수행될 수 있을지는 여전히 미스터리였습니다. 본 논문에서는 Graphormer를 제시함으로써 이 미스터리를 해결합니다. Graphormer는 표준 트랜스포머 아키텍처를 기반으로 구축되었으며, 특히 최근의 OpenGraphBenchmark Large-Scale Challenge(OGB-LSC)의 광범위한 그래프 표현 학습 작업에서 탁월한 결과를 얻을 수 있었습니다. 그래프에서 트랜스포머를 활용하는데 핵심은 그래프의 구조적 정보를 모델에 효과적으로 인코딩하는 것입니다. 이를 위해 우리는 Graphormer가 그래프 구조 데이터를 더 잘 모델링할 수 있도록 돕는 몇 가지 간단하면서도 효과적인 구조적 인코딩 방법을 제안합니다. 또한, 우리는 Graphormer의 표현을 수학적으로 특성화하고, 그래프의 구조적 정보를 인코딩하는 우리의 방식으로 많은 인기 있는 GNN 변형모델들이 Graphormer의 특수한 경우로 포함될 수 있음을 보여줍니다.* 이 모델은 [clefourrier](https://huggingface.co/clefourrier)가 기여했습니다. 원본 코드는 [이곳](https://github.com/microsoft/Graphormer)에서 확인할 수 있습니다. ## 사용 팁[[usage-tips]] 이 모델은 큰 그래프(100개 이상의 노드개수/엣지개수)에서는 메모리 사용량이 폭발적으로 증가하므로 잘 작동하지 않습니다. 대안으로 배치 크기를 줄이거나, RAM을 늘리거나 또는 algos_graphormer.pyx 파일의 `UNREACHABLE_NODE_DISTANCE` 매개변수를 줄이는 방법도 있지만, 700개 이상의 노드개수/엣지개수를 처리하기에는 여전히 어려울 것입니다. 이 모델은 토크나이저를 사용하지 않고, 대신 훈련 중에 특별한 콜레이터(collator)를 사용합니다. ## GraphormerConfig[[transformers.GraphormerConfig]] [[autodoc]] GraphormerConfig ## GraphormerModel[[transformers.GraphormerModel]] [[autodoc]] GraphormerModel - forward ## GraphormerForGraphClassification[[transformers.GraphormerForGraphClassification]] [[autodoc]] GraphormerForGraphClassification - forward
transformers/docs/source/ko/model_doc/graphormer.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/graphormer.md", "repo_id": "transformers", "token_count": 2817 }
45
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Swin Transformer V2 [[swin-transformer-v2]] ## 개요 [[overview]] Swin Transformer V2는 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo가 제안한 논문 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883)에서 소개되었습니다. 논문의 초록은 다음과 같습니다: *대규모 NLP 모델들은 언어 작업에서의 성능을 크게 향상하며, 성능이 포화하는 징후를 보이지 않습니다. 또한, 사람과 유사한 few-shot 학습 능력을 보여줍니다. 이 논문은 대규모 모델을 컴퓨터 비전 분야에서 탐구하고자 합니다. 대형 비전 모델을 훈련하고 적용하는 데 있어 세 가지 주요 문제를 다룹니다: 훈련 불안정성, 사전 학습과 파인튜닝 간의 해상도 차이, 그리고 레이블이 달린 데이터에 대한 높은 요구입니다. 세 가지 주요 기법을 제안합니다: 1) 훈련 안정성을 개선하기 위한 residual-post-norm 방법과 cosine attention의 결합; 2) 저해상도 이미지로 사전 학습된 모델을 고해상도 입력으로 전이할 수 있는 log-spaced continuous position bias 방법; 3) 레이블이 달린 방대한 이미지의 필요성을 줄이기 위한 self-supervised 사전 학습 방법인 SimMIM입니다. 이러한 기법들을 통해 30억 개의 파라미터를 가진 Swin Transformer V2 모델을 성공적으로 훈련하였으며, 이는 현재까지 가장 크고 고밀도의 비전 모델로, 최대 1,536×1,536 해상도의 이미지를 다룰 수 있습니다. 이 모델은 ImageNet-V2 이미지 분류, COCO 객체 탐지, ADE20K 의미론적 분할, Kinetics-400 비디오 행동 분류 등 네 가지 대표적인 비전 작업에서 새로운 성능 기록을 세웠습니다. 또한, 우리의 훈련은 Google의 billion-level 비전 모델과 비교해 40배 적은 레이블이 달린 데이터와 40배 적은 훈련 시간으로 이루어졌다는 점에서 훨씬 더 효율적입니다.* 이 모델은 [nandwalritik](https://huggingface.co/nandwalritik)이 기여하였습니다. 원본 코드는 [여기](https://github.com/microsoft/Swin-Transformer)에서 확인할 수 있습니다. ## 리소스 [[resources]] Swin Transformer v2의 사용을 도울 수 있는 Hugging Face 및 커뮤니티(🌎로 표시)의 공식 자료 목록입니다. <PipelineTag pipeline="image-classification"/> - [`Swinv2ForImageClassification`]은 이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)을 통해 지원됩니다. - 관련 자료: [이미지 분류 작업 가이드](../tasks/image_classification) 또한: - [`Swinv2ForMaskedImageModeling`]는 이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining)를 통해 지원됩니다. 새로운 자료를 추가하고 싶으시다면, 언제든지 Pull Request를 열어주세요! 저희가 검토해 드릴게요. 이때, 추가하는 자료는 기존 자료와 중복되지 않고 새로운 내용을 보여주는 자료여야 합니다. ## Swinv2Config [[transformers.Swinv2Config]] [[autodoc]] Swinv2Config ## Swinv2Model [[transformers.Swinv2Model]] [[autodoc]] Swinv2Model - forward ## Swinv2ForMaskedImageModeling [[transformers.Swinv2ForMaskedImageModeling]] [[autodoc]] Swinv2ForMaskedImageModeling - forward ## Swinv2ForImageClassification [[transformers.Swinv2ForImageClassification]] [[autodoc]] transformers.Swinv2ForImageClassification - forward
transformers/docs/source/ko/model_doc/swinv2.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/swinv2.md", "repo_id": "transformers", "token_count": 2648 }
46
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 단일 GPU에서 효율적인 추론 [[efficient-inference-on-a-single-gpu]] 이 가이드 외에도, [단일 GPU에서의 훈련 가이드](perf_train_gpu_one)와 [CPU에서의 추론 가이드](perf_infer_cpu)에서도 관련 정보를 찾을 수 있습니다. ## Better Transformer: PyTorch 네이티브 Transformer 패스트패스 [[better-transformer-pytorchnative-transformer-fastpath]] PyTorch 네이티브 [`nn.MultiHeadAttention`](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) 어텐션 패스트패스인 BetterTransformer는 [🤗 Optimum 라이브러리](https://huggingface.co/docs/optimum/bettertransformer/overview)의 통합을 통해 Transformers와 함께 사용할 수 있습니다. PyTorch의 어텐션 패스트패스는 커널 퓨전과 [중첩된 텐서](https://pytorch.org/docs/stable/nested.html)의 사용을 통해 추론 속도를 높일 수 있습니다. 자세한 벤치마크는 [이 블로그 글](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)에서 확인할 수 있습니다. [`optimum`](https://github.com/huggingface/optimum) 패키지를 설치한 후에는 추론 중 Better Transformer를 사용할 수 있도록 [`~PreTrainedModel.to_bettertransformer`]를 호출하여 관련 내부 모듈을 대체합니다: ```python model = model.to_bettertransformer() ``` [`~PreTrainedModel.reverse_bettertransformer`] 메소드는 정규화된 transformers 모델링을 사용하기 위해 모델을 저장하기 전 원래의 모델링으로 돌아갈 수 있도록 해줍니다: ```python model = model.reverse_bettertransformer() model.save_pretrained("saved_model") ``` PyTorch 2.0부터는 어텐션 패스트패스가 인코더와 디코더 모두에서 지원됩니다. 지원되는 아키텍처 목록은 [여기](https://huggingface.co/docs/optimum/bettertransformer/overview#supported-models)에서 확인할 수 있습니다. ## FP4 혼합 정밀도 추론을 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-fp4-mixedprecision-inference]] `bitsandbytes`를 설치하면 GPU에서 손쉽게 모델을 압축할 수 있습니다. FP4 양자화를 사용하면 원래의 전체 정밀도 버전과 비교하여 모델 크기를 최대 8배 줄일 수 있습니다. 아래에서 시작하는 방법을 확인하세요. <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> ### 요구 사항 [[requirements-for-fp4-mixedprecision-inference]] - 최신 `bitsandbytes` 라이브러리 `pip install bitsandbytes>=0.39.0` - 최신 `accelerate`를 소스에서 설치 `pip install git+https://github.com/huggingface/accelerate.git` - 최신 `transformers`를 소스에서 설치 `pip install git+https://github.com/huggingface/transformers.git` ### FP4 모델 실행 - 단일 GPU 설정 - 빠른 시작 [[running-fp4-models-single-gpu-setup-quickstart]] 다음 코드를 실행하여 단일 GPU에서 빠르게 FP4 모델을 실행할 수 있습니다. ```py from transformers import AutoModelForCausalLM model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` `device_map`은 선택 사항입니다. 그러나 `device_map = 'auto'`로 설정하는 것이 사용 가능한 리소스를 효율적으로 디스패치하기 때문에 추론에 있어 권장됩니다. ### FP4 모델 실행 - 다중 GPU 설정 [[running-fp4-models-multi-gpu-setup]] 다중 GPU에서 혼합 4비트 모델을 가져오는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_4bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_4bit=True) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "600MB", 1: "1GB"} model_name = "bigscience/bloom-3b" model_4bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_4bit=True, max_memory=max_memory_mapping ) ``` 이 예에서는 첫 번째 GPU가 600MB의 메모리를 사용하고 두 번째 GPU가 1GB를 사용합니다. ### 고급 사용법 [[advanced-usage]] 이 방법의 더 고급 사용법에 대해서는 [양자화](main_classes/quantization) 문서 페이지를 참조하세요. ## Int8 혼합 정밀도 행렬 분해를 위한 `bitsandbytes` 통합 [[bitsandbytes-integration-for-int8-mixedprecision-matrix-decomposition]] <Tip> 이 기능은 다중 GPU 설정에서도 사용할 수 있습니다. </Tip> [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339) 논문에서 우리는 몇 줄의 코드로 Hub의 모든 모델에 대한 Hugging Face 통합을 지원합니다. 이 방법은 `float16` 및 `bfloat16` 가중치에 대해 `nn.Linear` 크기를 2배로 줄이고, `float32` 가중치에 대해 4배로 줄입니다. 이는 절반 정밀도에서 이상치를 처리함으로써 품질에 거의 영향을 미치지 않습니다. ![HFxbitsandbytes.png](https://cdn-uploads.huggingface.co/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) Int8 혼합 정밀도 행렬 분해는 행렬 곱셈을 두 개의 스트림으로 분리합니다: (1) fp16로 곱해지는 체계적인 특이값 이상치 스트림 행렬(0.01%) 및 (2) int8 행렬 곱셈의 일반적인 스트림(99.9%). 이 방법을 사용하면 매우 큰 모델에 대해 예측 저하 없이 int8 추론이 가능합니다. 이 방법에 대한 자세한 내용은 [논문](https://arxiv.org/abs/2208.07339)이나 [통합에 관한 블로그 글](https://huggingface.co/blog/hf-bitsandbytes-integration)에서 확인할 수 있습니다. ![MixedInt8.gif](https://cdn-uploads.huggingface.co/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) 커널은 GPU 전용으로 컴파일되어 있기 때문에 혼합 8비트 모델을 실행하려면 GPU가 필요합니다. 이 기능을 사용하기 전에 모델의 1/4(또는 모델 가중치가 절반 정밀도인 경우 절반)을 저장할 충분한 GPU 메모리가 있는지 확인하세요. 이 모듈을 사용하는 데 도움이 되는 몇 가지 참고 사항이 아래에 나와 있습니다. 또는 [Google colab](#colab-demos)에서 데모를 따라할 수도 있습니다. ### 요구 사항 [[requirements-for-int8-mixedprecision-matrix-decomposition]] - `bitsandbytes<0.37.0`을 사용하는 경우, 8비트 텐서 코어(Turing, Ampere 또는 이후 아키텍처 - 예: T4, RTX20s RTX30s, A40-A100)를 지원하는 NVIDIA GPU에서 실행하는지 확인하세요. `bitsandbytes>=0.37.0`을 사용하는 경우, 모든 GPU가 지원됩니다. - 올바른 버전의 `bitsandbytes`를 다음 명령으로 설치하세요: `pip install bitsandbytes>=0.31.5` - `accelerate`를 설치하세요 `pip install accelerate>=0.12.0` ### 혼합 Int8 모델 실행 - 단일 GPU 설정 [[running-mixedint8-models-single-gpu-setup]] 필요한 라이브러리를 설치한 후 혼합 8비트 모델을 가져오는 방법은 다음과 같습니다: ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` 텍스트 생성의 경우: * `pipeline()` 함수 대신 모델의 `generate()` 메소드를 사용하는 것을 권장합니다. `pipeline()` 함수로는 추론이 가능하지만, 혼합 8비트 모델에 최적화되지 않았기 때문에 `generate()` 메소드를 사용하는 것보다 느릴 수 있습니다. 또한, nucleus 샘플링과 같은 일부 샘플링 전략은 혼합 8비트 모델에 대해 `pipeline()` 함수에서 지원되지 않습니다. * 입력을 모델과 동일한 GPU에 배치하는 것이 좋습니다. 다음은 간단한 예입니다: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_name = "bigscience/bloom-2b5" tokenizer = AutoTokenizer.from_pretrained(model_name) model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) prompt = "Hello, my llama is cute" inputs = tokenizer(prompt, return_tensors="pt").to("cuda") generated_ids = model.generate(**inputs) outputs = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) ``` ### 혼합 Int8 모델 실행 - 다중 GPU 설정 [[running-mixedint8-models-multi-gpu-setup]] 다중 GPU에서 혼합 8비트 모델을 로드하는 방법은 단일 GPU 설정과 동일합니다(동일한 명령어 사용): ```py model_name = "bigscience/bloom-2b5" model_8bit = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) ``` 하지만 `accelerate`를 사용하여 각 GPU에 할당할 GPU RAM을 제어할 수 있습니다. 다음과 같이 `max_memory` 인수를 사용하세요: ```py max_memory_mapping = {0: "1GB", 1: "2GB"} model_name = "bigscience/bloom-3b" model_8bit = AutoModelForCausalLM.from_pretrained( model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping ) ``` 이 예시에서는 첫 번째 GPU가 1GB의 메모리를 사용하고 두 번째 GPU가 2GB를 사용합니다. ### Colab 데모 [[colab-demos]] 이 방법을 사용하면 이전에 Google Colab에서 추론할 수 없었던 모델에 대해 추론할 수 있습니다. Google Colab에서 8비트 양자화를 사용하여 T5-11b(42GB in fp32)를 실행하는 데모를 확인하세요: [![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) 또는 BLOOM-3B에 대한 데모를 확인하세요: [![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing)
transformers/docs/source/ko/perf_infer_gpu_one.md/0
{ "file_path": "transformers/docs/source/ko/perf_infer_gpu_one.md", "repo_id": "transformers", "token_count": 6517 }
47
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPTQ [[gptq]] <Tip> PEFT를 활용한 GPTQ 양자화를 사용해보시려면 이 [노트북](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb)을 참고하시고, 자세한 내용은 이 [블로그 게시물](https://huggingface.co/blog/gptq-integration)에서 확인하세요! </Tip> [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) 라이브러리는 GPTQ 알고리즘을 구현합니다. 이는 훈련 후 양자화 기법으로, 가중치 행렬의 각 행을 독립적으로 양자화하여 오차를 최소화하는 가중치 버전을 찾습니다. 이 가중치는 int4로 양자화되지만, 추론 중에는 실시간으로 fp16으로 복원됩니다. 이는 int4 가중치가 GPU의 전역 메모리 대신 결합된 커널에서 역양자화되기 때문에 메모리 사용량을 4배 절약할 수 있으며, 더 낮은 비트 너비를 사용함으로써 통신 시간이 줄어들어 추론 속도가 빨라질 것으로 기대할 수 있습니다. 시작하기 전에 다음 라이브러리들이 설치되어 있는지 확인하세요: ```bash pip install auto-gptq pip install --upgrade accelerate optimum transformers ``` 모델을 양자화하려면(현재 텍스트 모델만 지원됨) [`GPTQConfig`] 클래스를 생성하고 양자화할 비트 수, 양자화를 위한 가중치 교정 데이터셋, 그리고 데이터셋을 준비하기 위한 토크나이저를 설정해야 합니다. ```py from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig model_id = "facebook/opt-125m" tokenizer = AutoTokenizer.from_pretrained(model_id) gptq_config = GPTQConfig(bits=4, dataset="c4", tokenizer=tokenizer) ``` 자신의 데이터셋을 문자열 리스트 형태로 전달할 수도 있지만, GPTQ 논문에서 사용한 동일한 데이터셋을 사용하는 것을 강력히 권장합니다. ```py dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."] gptq_config = GPTQConfig(bits=4, dataset=dataset, tokenizer=tokenizer) ``` 양자화할 모델을 로드하고 `gptq_config`을 [`~AutoModelForCausalLM.from_pretrained`] 메소드에 전달하세요. 모델을 메모리에 맞추기 위해 `device_map="auto"`를 설정하여 모델을 자동으로 CPU로 오프로드하고, 양자화를 위해 모델 모듈이 CPU와 GPU 간에 이동할 수 있도록 합니다. ```py quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config) ``` 데이터셋이 너무 커서 메모리가 부족한 경우를 대비한 디스크 오프로드는 현재 지원하지 않고 있습니다. 이럴 때는 `max_memory` 매개변수를 사용하여 디바이스(GPU 및 CPU)에서 사용할 메모리 양을 할당해 보세요: ```py quantized_model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", max_memory={0: "30GiB", 1: "46GiB", "cpu": "30GiB"}, quantization_config=gptq_config) ``` <Tip warning={true}> 하드웨어와 모델 매개변수량에 따라 모델을 처음부터 양자화하는 데 드는 시간이 서로 다를 수 있습니다. 예를 들어, 무료 등급의 Google Colab GPU로 비교적 가벼운 [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) 모델을 양자화하는 데 약 5분이 걸리지만, NVIDIA A100으로 175B에 달하는 매개변수를 가진 모델을 양자화하는 데는 약 4시간에 달하는 시간이 걸릴 수 있습니다. 모델을 양자화하기 전에, Hub에서 해당 모델의 GPTQ 양자화 버전이 이미 존재하는지 확인하는 것이 좋습니다. </Tip> 모델이 양자화되면, 모델과 토크나이저를 Hub에 푸시하여 쉽게 공유하고 접근할 수 있습니다. [`GPTQConfig`]를 저장하기 위해 [`~PreTrainedModel.push_to_hub`] 메소드를 사용하세요: ```py quantized_model.push_to_hub("opt-125m-gptq") tokenizer.push_to_hub("opt-125m-gptq") ``` 양자화된 모델을 로컬에 저장하려면 [`~PreTrainedModel.save_pretrained`] 메소드를 사용할 수 있습니다. 모델이 `device_map` 매개변수로 양자화되었을 경우, 저장하기 전에 전체 모델을 GPU나 CPU로 이동해야 합니다. 예를 들어, 모델을 CPU에 저장하려면 다음과 같이 합니다: ```py quantized_model.save_pretrained("opt-125m-gptq") tokenizer.save_pretrained("opt-125m-gptq") # device_map이 설정된 상태에서 양자화된 경우 quantized_model.to("cpu") quantized_model.save_pretrained("opt-125m-gptq") ``` 양자화된 모델을 다시 로드하려면 [`~PreTrainedModel.from_pretrained`] 메소드를 사용하고, `device_map="auto"`를 설정하여 모든 사용 가능한 GPU에 모델을 자동으로 분산시켜 더 많은 메모리를 사용하지 않으면서 모델을 더 빠르게 로드할 수 있습니다. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto") ``` ## ExLlama [[exllama]] [ExLlama](https://github.com/turboderp/exllama)은 [Llama](model_doc/llama) 모델의 Python/C++/CUDA 구현체로, 4비트 GPTQ 가중치를 사용하여 더 빠른 추론을 위해 설계되었습니다(이 [벤치마크](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)를 참고하세요). ['GPTQConfig'] 객체를 생성할 때 ExLlama 커널이 기본적으로 활성화됩니다. 추론 속도를 더욱 높이기 위해, `exllama_config` 매개변수를 구성하여 [ExLlamaV2](https://github.com/turboderp/exllamav2) 커널을 사용할 수 있습니다: ```py import torch from transformers import AutoModelForCausalLM, GPTQConfig gptq_config = GPTQConfig(bits=4, exllama_config={"version":2}) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=gptq_config) ``` <Tip warning={true}> 4비트 모델만 지원되며, 양자화된 모델을 PEFT로 미세 조정하는 경우 ExLlama 커널을 비활성화할 것을 권장합니다. </Tip> ExLlama 커널은 전체 모델이 GPU에 있을 때만 지원됩니다. AutoGPTQ(버전 0.4.2 이상)로 CPU에서 추론을 수행하는 경우 ExLlama 커널을 비활성화해야 합니다. 이를 위해 config.json 파일의 양자화 설정에서 ExLlama 커널과 관련된 속성을 덮어써야 합니다. ```py import torch from transformers import AutoModelForCausalLM, GPTQConfig gptq_config = GPTQConfig(bits=4, use_exllama=False) model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="cpu", quantization_config=gptq_config) ```
transformers/docs/source/ko/quantization/gptq.md/0
{ "file_path": "transformers/docs/source/ko/quantization/gptq.md", "repo_id": "transformers", "token_count": 4640 }
48
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 인과 언어 모델링[[causal-language-modeling]] [[open-in-colab]] 언어 모델링은 인과적 언어 모델링과 마스크드 언어 모델링, 두 가지 유형으로 나뉩니다. 이 가이드에서는 인과적 언어 모델링을 설명합니다. 인과 언어 모델은 텍스트 생성에 자주 사용됩니다. 또 창의적인 방향으로 응용할 수 있습니다. 직접 사용하며 재미있는 탐구를 해보거나, Copilot 또는 CodeParrot와 같은 지능형 코딩 어시스턴트의 기반이 되기도 합니다. <Youtube id="Vpjb1lu0MDk"/> 인과 언어 모델링은 토큰 시퀀스에서 다음 토큰을 예측하며, 모델은 왼쪽의 토큰에만 접근할 수 있습니다. 이는 모델이 미래의 토큰을 볼 수 없다는 것을 의미합니다. 인과 언어 모델의 예로 GPT-2가 있죠. 이 가이드에서는 다음 작업을 수행하는 방법을 안내합니다: 1. [DistilGPT2](https://huggingface.co/distilbert/distilgpt2) 모델을 [ELI5](https://huggingface.co/datasets/eli5) 데이터 세트의 [r/askscience](https://www.reddit.com/r/askscience/) 하위 집합으로 미세 조정 2. 미세 조정된 모델을 추론에 사용 <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/text-generation)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate ``` 커뮤니티에 모델을 업로드하고 공유하기 위해 Hugging Face 계정에 로그인하는 것을 권장합니다. 알림이 표시되면 토큰을 입력하여 로그인하세요: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## ELI5 데이터 세트 불러오기[[load-eli5-dataset]] 먼저, 🤗 Datasets 라이브러리에서 r/askscience의 작은 하위 집합인 ELI5 데이터 세트를 불러옵니다. 이를 통해 전체 데이터 세트에서 학습하는 데 더 많은 시간을 투자하기 전에, 실험해봄으로써 모든 것이 작동하는지 확인할 수 있습니다. ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5", split="train_asks[:5000]") ``` 데이터 세트의 `train_asks` 분할을 [`~datasets.Dataset.train_test_split`] 메소드를 사용하여 학습 및 테스트 세트로 분할합니다: ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` 그런 다음 예제를 살펴보세요: ```py >>> eli5["train"][0] {'answers': {'a_id': ['c3d1aib', 'c3d4lya'], 'score': [6, 3], 'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]}, 'answers_urls': {'url': []}, 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']}, 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls': {'url': []}} ``` 많아 보일 수 있지만, 실제로는 `text` 필드만 중요합니다. 언어 모델링 작업의 장점은 레이블이 필요하지 않다는 것입니다. 다음 단어 *자체가* 레이블입니다. (이렇게 레이블을 제공하지 않아도 되는 학습을 비지도 학습이라고 일컫습니다) ## 전처리[[preprocess]] <Youtube id="ma1TrR7gE7I"/> 다음 단계는 `text` 필드를 전처리하기 위해 DistilGPT2 토크나이저를 불러오는 것입니다. ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") ``` 위의 예제에서 알 수 있듯이, `text` 필드는 `answers` 아래에 중첩되어 있습니다. 따라서 [`flatten`](https://huggingface.co/docs/datasets/process#flatten) 메소드를 사용하여 중첩 구조에서 `text` 하위 필드를 추출해야 합니다. ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'answers.a_id': ['c3d1aib', 'c3d4lya'], 'answers.score': [6, 3], 'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"], 'answers_urls.url': [], 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'], 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls.url': []} ``` 각 하위 필드는 이제 `answers` 접두사를 가진 별도의 열로 나뉘었으며, `text` 필드는 이제 리스트입니다. 각 문장을 개별적으로 토큰화하는 대신, 먼저 리스트를 문자열로 변환하여 한꺼번에 토큰화할 수 있습니다. 다음은 문자열 리스트를 결합하고 결과를 토큰화하는 첫 번째 전처리 함수입니다: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` 이 전처리 함수를 전체 데이터 세트에 적용하려면 🤗 Datasets [`~datasets.Dataset.map`] 메소드를 사용하세요. `batched=True`로 설정하여 데이터셋의 여러 요소를 한 번에 처리하고, `num_proc`를 증가시켜 프로세스 수를 늘릴 수 있습니다. 필요 없는 열은 제거하세요: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` 이제 데이터 세트는 시퀀스가 토큰화됐지만, 일부 시퀀스는 모델의 최대 입력 길이보다 길 수 있습니다. 이제 두 번째 전처리 함수를 사용하여 - 모든 시퀀스를 연결하고, - `block_size`로 정의된 길이로 연결된 시퀀스를 여러 개의 짧은 묶음으로 나눕니다. 이 값은 최대 입력 길이와 GPU RAM을 고려해 충분히 짧아야 합니다. ```py >>> block_size = 128 >>> def group_texts(examples): ... # Concatenate all texts. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can ... # customize this part to your needs. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # Split by chunks of block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` 전체 데이터 세트에 `group_texts` 함수를 적용하세요: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` 그런 다음 [`DataCollatorForLanguageModeling`]을 사용하여 예제의 배치를 만듭니다. 데이터 세트 전체를 최대 길이로 패딩하는 것보다, 취합 단계에서 각 배치의 최대 길이로 문장을 *동적으로 패딩*하는 것이 더 효율적입니다. <frameworkcontent> <pt> 패딩 토큰으로 종결 토큰을 사용하고 `mlm=False`로 설정하세요. 이렇게 하면 입력을 오른쪽으로 한 칸씩 시프트한 값을 레이블로 사용합니다: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) ``` </pt> <tf> 패딩 토큰으로 종결 토큰을 사용하고 `mlm=False`로 설정하세요. 이렇게 하면 입력을 오른쪽으로 한 칸씩 시프트한 값을 레이블로 사용합니다: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf") ``` </tf> </frameworkcontent> ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]를 사용하여 모델을 미세 조정하는 방법을 잘 모르신다면 [기본 튜토리얼](../training#train-with-pytorch-trainer)을 확인해보세요! </Tip> 이제 모델을 훈련하기 준비가 되었습니다! [`AutoModelForCausalLM`]를 사용하여 DistilGPT2를 불러옵니다: ```py >>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` 여기까지 진행하면 세 단계만 남았습니다: 1. [`TrainingArguments`]에서 훈련 하이퍼파라미터를 정의하세요. `output_dir`은 유일한 필수 매개변수로, 모델을 저장할 위치를 지정합니다. (먼저 Hugging Face에 로그인 필수) `push_to_hub=True`로 설정하여 이 모델을 허브에 업로드할 수 있습니다. 2. 훈련 인수를 [`Trainer`]에 모델, 데이터 세트 및 데이터 콜레이터와 함께 전달하세요. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정하세요. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_clm-model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` 훈련이 완료되면 [`~transformers.Trainer.evaluate`] 메소드를 사용하여 모델을 평가하고 퍼플렉서티를 얻을 수 있습니다: ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 49.61 ``` 그런 다음 [`~transformers.Trainer.push_to_hub`] 메소드를 사용하여 모델을 허브에 공유하세요. 이렇게 하면 누구나 모델을 사용할 수 있습니다: ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras를 사용하여 모델을 미세 조정하는 방법에 익숙하지 않다면 [기본 튜토리얼](../training#train-a-tensorflow-model-with-keras)을 확인해보세요! </Tip> TensorFlow에서 모델을 미세 조정하려면, 먼저 옵티마이저 함수, 학습률 스케줄 및 일부 훈련 하이퍼파라미터를 설정하세요: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 그런 다음 [`TFAutoModelForCausalLM`]를 사용하여 DistilGPT2를 불러옵니다: ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]을 사용하여 데이터 세트를 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` [`compile`](https://keras.io/api/models/model_training_apis/#compile-method)을 사용하여 모델을 훈련하기 위해 구성하세요. Transformers 모델은 모두 기본적인 작업 관련 손실 함수를 가지고 있으므로, 원한다면 별도로 지정하지 않아도 됩니다: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) # 별도로 loss 인자를 넣지 않았어요! ``` [`~transformers.PushToHubCallback`]에서 모델과 토크나이저를 업로드할 위치를 지정할 수 있습니다: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_clm-model", ... tokenizer=tokenizer, ... ) ``` 마지막으로, 모델을 훈련하기 위해 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method)을 호출하세요. 훈련 데이터 세트, 검증 데이터 세트, 에폭 수 및 콜백을 전달하세요: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` 훈련이 완료되면 모델이 자동으로 허브에 업로드되어 모두가 사용할 수 있습니다! </tf> </frameworkcontent> <Tip> 인과 언어 모델링을 위해 모델을 미세 조정하는 더 자세한 예제는 해당하는 [PyTorch 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) 또는 [TensorFlow 노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 좋아요, 이제 모델을 미세 조정했으므로 추론에 사용할 수 있습니다! 생성할 텍스트를 위한 프롬프트를 만들어보세요: ```py >>> prompt = "Somatic hypermutation allows the immune system to" ``` 추론을 위해 미세 조정된 모델을 간단히 사용하는 가장 간단한 방법은 [`pipeline`]에서 사용하는 것입니다. 모델과 함께 텍스트 생성을 위한 `pipeline`을 인스턴스화하고 텍스트를 전달하세요: ```py >>> from transformers import pipeline >>> generator = pipeline("text-generation", model="my_awesome_eli5_clm-model") >>> generator(prompt) [{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}] ``` <frameworkcontent> <pt> 텍스트를 토큰화하고 `input_ids`를 PyTorch 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="pt").input_ids ``` [`~generation.GenerationMixin.generate`] 메소드를 사용하여 텍스트를 생성하세요. 생성을 제어하는 다양한 텍스트 생성 전략과 매개변수에 대한 자세한 내용은 [텍스트 생성 전략](../generation_strategies) 페이지를 확인하세요. ```py >>> from transformers import AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained("my_awesome_eli5_clm-model") >>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` 생성된 토큰 ID를 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"] ``` </pt> <tf> 텍스트를 토큰화하고 `input_ids`를 TensorFlow 텐서로 반환하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_clm-model") >>> inputs = tokenizer(prompt, return_tensors="tf").input_ids ``` [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] 메소드를 사용하여 요약을 생성하세요. 생성을 제어하는 다양한 텍스트 생성 전략과 매개변수에 대한 자세한 내용은 [텍스트 생성 전략](../generation_strategies) 페이지를 확인하세요. ```py >>> from transformers import TFAutoModelForCausalLM >>> model = TFAutoModelForCausalLM.from_pretrained("my_awesome_eli5_clm-model") >>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95) ``` 생성된 토큰 ID를 다시 텍스트로 디코딩하세요: ```py >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for'] ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/language_modeling.md/0
{ "file_path": "transformers/docs/source/ko/tasks/language_modeling.md", "repo_id": "transformers", "token_count": 10486 }
49
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 제로샷(zero-shot) 객체 탐지[[zeroshot-object-detection]] [[open-in-colab]] 일반적으로 [객체 탐지](object_detection)에 사용되는 모델을 학습하기 위해서는 레이블이 지정된 이미지 데이터 세트가 필요합니다. 그리고 학습 데이터에 존재하는 클래스(레이블)만 탐지할 수 있다는 한계점이 있습니다. 다른 방식을 사용하는 [OWL-ViT](../model_doc/owlvit) 모델로 제로샷 객체 탐지가 가능합니다. OWL-ViT는 개방형 어휘(open-vocabulary) 객체 탐지기입니다. 즉, 레이블이 지정된 데이터 세트에 미세 조정하지 않고 자유 텍스트 쿼리를 기반으로 이미지에서 객체를 탐지할 수 있습니다. OWL-ViT 모델은 멀티 모달 표현을 활용해 개방형 어휘 탐지(open-vocabulary detection)를 수행합니다. [CLIP](../model_doc/clip) 모델에 경량화(lightweight)된 객체 분류와 지역화(localization) 헤드를 결합합니다. 개방형 어휘 탐지는 CLIP의 텍스트 인코더로 free-text 쿼리를 임베딩하고, 객체 분류와 지역화 헤드의 입력으로 사용합니다. 이미지와 해당 텍스트 설명을 연결하면 ViT가 이미지 패치(image patches)를 입력으로 처리합니다. OWL-ViT 모델의 저자들은 CLIP 모델을 처음부터 학습(scratch learning)한 후에, bipartite matching loss를 사용하여 표준 객체 인식 데이터셋으로 OWL-ViT 모델을 미세 조정했습니다. 이 접근 방식을 사용하면 모델은 레이블이 지정된 데이터 세트에 대한 사전 학습 없이도 텍스트 설명을 기반으로 객체를 탐지할 수 있습니다. 이번 가이드에서는 OWL-ViT 모델의 사용법을 다룰 것입니다: - 텍스트 프롬프트 기반 객체 탐지 - 일괄 객체 탐지 - 이미지 가이드 객체 탐지 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install -q transformers ``` ## 제로샷(zero-shot) 객체 탐지 파이프라인[[zeroshot-object-detection-pipeline]] [`pipeline`]을 활용하면 가장 간단하게 OWL-ViT 모델을 추론해볼 수 있습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 제로샷(zero-shot) 객체 탐지용 파이프라인을 인스턴스화합니다: ```python >>> from transformers import pipeline >>> checkpoint = "google/owlvit-base-patch32" >>> detector = pipeline(model=checkpoint, task="zero-shot-object-detection") ``` 다음으로, 객체를 탐지하고 싶은 이미지를 선택하세요. 여기서는 [NASA](https://www.nasa.gov/multimedia/imagegallery/index.html) Great Images 데이터 세트의 일부인 우주비행사 에일린 콜린스(Eileen Collins) 사진을 사용하겠습니다. ```py >>> import skimage >>> import numpy as np >>> from PIL import Image >>> image = skimage.data.astronaut() >>> image = Image.fromarray(np.uint8(image)).convert("RGB") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_1.png" alt="Astronaut Eileen Collins"/> </div> 이미지와 해당 이미지의 후보 레이블을 파이프라인으로 전달합니다. 여기서는 이미지를 직접 전달하지만, 컴퓨터에 저장된 이미지의 경로나 url로 전달할 수도 있습니다. candidate_labels는 이 예시처럼 간단한 단어일 수도 있고 좀 더 설명적인 단어일 수도 있습니다. 또한, 이미지를 검색(query)하려는 모든 항목에 대한 텍스트 설명도 전달합니다. ```py >>> predictions = detector( ... image, ... candidate_labels=["human face", "rocket", "nasa badge", "star-spangled banner"], ... ) >>> predictions [{'score': 0.3571370542049408, 'label': 'human face', 'box': {'xmin': 180, 'ymin': 71, 'xmax': 271, 'ymax': 178}}, {'score': 0.28099656105041504, 'label': 'nasa badge', 'box': {'xmin': 129, 'ymin': 348, 'xmax': 206, 'ymax': 427}}, {'score': 0.2110239565372467, 'label': 'rocket', 'box': {'xmin': 350, 'ymin': -1, 'xmax': 468, 'ymax': 288}}, {'score': 0.13790413737297058, 'label': 'star-spangled banner', 'box': {'xmin': 1, 'ymin': 1, 'xmax': 105, 'ymax': 509}}, {'score': 0.11950037628412247, 'label': 'nasa badge', 'box': {'xmin': 277, 'ymin': 338, 'xmax': 327, 'ymax': 380}}, {'score': 0.10649408400058746, 'label': 'rocket', 'box': {'xmin': 358, 'ymin': 64, 'xmax': 424, 'ymax': 280}}] ``` 이제 예측값을 시각화해봅시다: ```py >>> from PIL import ImageDraw >>> draw = ImageDraw.Draw(image) >>> for prediction in predictions: ... box = prediction["box"] ... label = prediction["label"] ... score = prediction["score"] ... xmin, ymin, xmax, ymax = box.values() ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{label}: {round(score,2)}", fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_2.png" alt="Visualized predictions on NASA image"/> </div> ## 텍스트 프롬프트 기반 객체 탐지[[textprompted-zeroshot-object-detection-by-hand]] 제로샷 객체 탐지 파이프라인 사용법에 대해 살펴보았으니, 이제 동일한 결과를 복제해보겠습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?other=owlvit)에서 관련 모델과 프로세서를 가져오는 것으로 시작합니다. 여기서는 이전과 동일한 체크포인트를 사용하겠습니다: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection >>> model = AutoModelForZeroShotObjectDetection.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` 다른 이미지를 사용해 보겠습니다: ```py >>> import requests >>> url = "https://unsplash.com/photos/oj0zeY2Ltk4/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MTR8fHBpY25pY3xlbnwwfHx8fDE2Nzc0OTE1NDk&force=true&w=640" >>> im = Image.open(requests.get(url, stream=True).raw) >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_3.png" alt="Beach photo"/> </div> 프로세서를 사용해 모델의 입력을 준비합니다. 프로세서는 모델의 입력으로 사용하기 위해 이미지 크기를 변환하고 정규화하는 이미지 프로세서와 텍스트 입력을 처리하는 [`CLIPTokenizer`]로 구성됩니다. ```py >>> text_queries = ["hat", "book", "sunglasses", "camera"] >>> inputs = processor(text=text_queries, images=im, return_tensors="pt") ``` 모델에 입력을 전달하고 결과를 후처리 및 시각화합니다. 이미지 프로세서가 모델에 이미지를 입력하기 전에 이미지 크기를 조정했기 때문에, [`~OwlViTImageProcessor.post_process_object_detection`] 메소드를 사용해 예측값의 바운딩 박스(bounding box)가 원본 이미지의 좌표와 상대적으로 동일한지 확인해야 합니다. ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = torch.tensor([im.size[::-1]]) ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(im) >>> scores = results["scores"].tolist() >>> labels = results["labels"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[label]}: {round(score,2)}", fill="white") >>> im ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## 일괄 처리[[batch-processing]] 여러 이미지와 텍스트 쿼리를 전달하여 여러 이미지에서 서로 다른(또는 동일한) 객체를 검색할 수 있습니다. 일괄 처리를 위해서 텍스트 쿼리는 이중 리스트로, 이미지는 PIL 이미지, PyTorch 텐서, 또는 NumPy 배열로 이루어진 리스트로 프로세서에 전달해야 합니다. ```py >>> images = [image, im] >>> text_queries = [ ... ["human face", "rocket", "nasa badge", "star-spangled banner"], ... ["hat", "book", "sunglasses", "camera"], ... ] >>> inputs = processor(text=text_queries, images=images, return_tensors="pt") ``` 이전에는 후처리를 위해 단일 이미지의 크기를 텐서로 전달했지만, 튜플을 전달할 수 있고, 여러 이미지를 처리하는 경우에는 튜플로 이루어진 리스트를 전달할 수도 있습니다. 아래 두 예제에 대한 예측을 생성하고, 두 번째 이미지(`image_idx = 1`)를 시각화해 보겠습니다. ```py >>> with torch.no_grad(): ... outputs = model(**inputs) ... target_sizes = [x.size[::-1] for x in images] ... results = processor.post_process_object_detection(outputs, threshold=0.1, target_sizes=target_sizes) >>> image_idx = 1 >>> draw = ImageDraw.Draw(images[image_idx]) >>> scores = results[image_idx]["scores"].tolist() >>> labels = results[image_idx]["labels"].tolist() >>> boxes = results[image_idx]["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="red", width=1) ... draw.text((xmin, ymin), f"{text_queries[image_idx][label]}: {round(score,2)}", fill="white") >>> images[image_idx] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_4.png" alt="Beach photo with detected objects"/> </div> ## 이미지 가이드 객체 탐지[[imageguided-object-detection]] 텍스트 쿼리를 이용한 제로샷 객체 탐지 외에도 OWL-ViT 모델은 이미지 가이드 객체 탐지 기능을 제공합니다. 이미지를 쿼리로 사용해 대상 이미지에서 유사한 객체를 찾을 수 있다는 의미입니다. 텍스트 쿼리와 달리 하나의 예제 이미지에서만 가능합니다. 소파에 고양이 두 마리가 있는 이미지를 대상 이미지(target image)로, 고양이 한 마리가 있는 이미지를 쿼리로 사용해보겠습니다: ```py >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image_target = Image.open(requests.get(url, stream=True).raw) >>> query_url = "http://images.cocodataset.org/val2017/000000524280.jpg" >>> query_image = Image.open(requests.get(query_url, stream=True).raw) ``` 다음 이미지를 살펴보겠습니다: ```py >>> import matplotlib.pyplot as plt >>> fig, ax = plt.subplots(1, 2) >>> ax[0].imshow(image_target) >>> ax[1].imshow(query_image) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_5.png" alt="Cats"/> </div> 전처리 단계에서 텍스트 쿼리 대신에 `query_images`를 사용합니다: ```py >>> inputs = processor(images=image_target, query_images=query_image, return_tensors="pt") ``` 예측의 경우, 모델에 입력을 전달하는 대신 [`~OwlViTForObjectDetection.image_guided_detection`]에 전달합니다. 레이블이 없다는 점을 제외하면 이전과 동일합니다. 이전과 동일하게 이미지를 시각화합니다. ```py >>> with torch.no_grad(): ... outputs = model.image_guided_detection(**inputs) ... target_sizes = torch.tensor([image_target.size[::-1]]) ... results = processor.post_process_image_guided_detection(outputs=outputs, target_sizes=target_sizes)[0] >>> draw = ImageDraw.Draw(image_target) >>> scores = results["scores"].tolist() >>> boxes = results["boxes"].tolist() >>> for box, score, label in zip(boxes, scores, labels): ... xmin, ymin, xmax, ymax = box ... draw.rectangle((xmin, ymin, xmax, ymax), outline="white", width=4) >>> image_target ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/zero-sh-obj-detection_6.png" alt="Cats with bounding boxes"/> </div> OWL-ViT 모델을 추론하고 싶다면 아래 데모를 확인하세요: <iframe src="https://adirik-owl-vit.hf.space" frameborder="0" width="850" height="450" ></iframe>
transformers/docs/source/ko/tasks/zero_shot_object_detection.md/0
{ "file_path": "transformers/docs/source/ko/tasks/zero_shot_object_detection.md", "repo_id": "transformers", "token_count": 7704 }
50
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convertendo checkpoints do TensorFlow para Pytorch Uma interface de linha de comando é fornecida para converter os checkpoints originais Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM em modelos que podem ser carregados usando os métodos `from_pretrained` da biblioteca. <Tip> A partir da versão 2.3.0 o script de conversão agora faz parte do transformers CLI (**transformers-cli**) disponível em qualquer instalação transformers >= 2.3.0. A documentação abaixo reflete o formato do comando **transformers-cli convert**. </Tip> ## BERT Você pode converter qualquer checkpoint do BERT em TensorFlow (em particular [os modelos pré-treinados lançados pelo Google](https://github.com/google-research/bert#pre-trained-models)) em um arquivo PyTorch usando um [convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py) script. Esta Interface de Linha de Comando (CLI) recebe como entrada um checkpoint do TensorFlow (três arquivos começando com `bert_model.ckpt`) e o arquivo de configuração (`bert_config.json`), e então cria um modelo PyTorch para esta configuração, carrega os pesos do checkpoint do TensorFlow no modelo PyTorch e salva o modelo resultante em um arquivo PyTorch que pode ser importado usando `from_pretrained()` (veja o exemplo em [quicktour](quicktour) , [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py) ). Você só precisa executar este script de conversão **uma vez** para obter um modelo PyTorch. Você pode então desconsiderar o checkpoint em TensorFlow (os três arquivos começando com `bert_model.ckpt`), mas certifique-se de manter o arquivo de configuração (\ `bert_config.json`) e o arquivo de vocabulário (`vocab.txt`), pois eles também são necessários para o modelo PyTorch. Para executar este script de conversão específico, você precisará ter o TensorFlow e o PyTorch instalados (`pip install tensorflow`). O resto do repositório requer apenas o PyTorch. Aqui está um exemplo do processo de conversão para um modelo `BERT-Base Uncased` pré-treinado: ```bash export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \ --tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \ --config $BERT_BASE_DIR/bert_config.json \ --pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin ``` Você pode baixar os modelos pré-treinados do Google para a conversão [aqui](https://github.com/google-research/bert#pre-trained-models). ## ALBERT Converta os checkpoints do modelo ALBERT em TensorFlow para PyTorch usando o [convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py) script. A Interface de Linha de Comando (CLI) recebe como entrada um checkpoint do TensorFlow (três arquivos começando com `model.ckpt-best`) e o arquivo de configuração (`albert_config.json`), então cria e salva um modelo PyTorch. Para executar esta conversão, você precisa ter o TensorFlow e o PyTorch instalados. Aqui está um exemplo do processo de conversão para o modelo `ALBERT Base` pré-treinado: ```bash export ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \ --tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \ --config $ALBERT_BASE_DIR/albert_config.json \ --pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin ``` Você pode baixar os modelos pré-treinados do Google para a conversão [aqui](https://github.com/google-research/albert#pre-trained-models). ## OpenAI GPT Aqui está um exemplo do processo de conversão para um modelo OpenAI GPT pré-treinado, supondo que seu checkpoint NumPy foi salvo com o mesmo formato do modelo pré-treinado OpenAI (veja [aqui](https://github.com/openai/finetune-transformer-lm)\ ) ```bash export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \ --tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT_CONFIG] \ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \ ``` ## OpenAI GPT-2 Aqui está um exemplo do processo de conversão para um modelo OpenAI GPT-2 pré-treinado (consulte [aqui](https://github.com/openai/gpt-2)) ```bash export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/openai-community/gpt2/pretrained/weights transformers-cli convert --model_type gpt2 \ --tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT2_CONFIG] \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` ## XLNet Aqui está um exemplo do processo de conversão para um modelo XLNet pré-treinado: ```bash export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \ --tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \ --config $TRANSFO_XL_CONFIG_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--finetuning_task_name XLNET_FINETUNED_TASK] \ ``` ## XLM Aqui está um exemplo do processo de conversão para um modelo XLM pré-treinado: ```bash export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \ --tf_checkpoint $XLM_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT [--config XML_CONFIG] \ [--finetuning_task_name XML_FINETUNED_TASK] ``` ## T5 Aqui está um exemplo do processo de conversão para um modelo T5 pré-treinado: ```bash export T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \ --tf_checkpoint $T5/t5_model.ckpt \ --config $T5/t5_config.json \ --pytorch_dump_output $T5/pytorch_model.bin ```
transformers/docs/source/pt/converting_tensorflow_models.md/0
{ "file_path": "transformers/docs/source/pt/converting_tensorflow_models.md", "repo_id": "transformers", "token_count": 2440 }
51
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # శీఘ్ర పర్యటన [[ఓపెన్-ఇన్-కోలాబ్]] 🤗 ట్రాన్స్‌ఫార్మర్‌లతో లేచి పరుగెత్తండి! మీరు డెవలపర్ అయినా లేదా రోజువారీ వినియోగదారు అయినా, ఈ శీఘ్ర పర్యటన మీకు ప్రారంభించడానికి సహాయం చేస్తుంది మరియు [`pipeline`] అనుమితి కోసం ఎలా ఉపయోగించాలో మీకు చూపుతుంది, [AutoClass](./model_doc/auto) తో ప్రీట్రైన్డ్ మోడల్ మరియు ప్రిప్రాసెసర్/ ఆటో, మరియు PyTorch లేదా TensorFlowతో మోడల్‌కు త్వరగా శిక్షణ ఇవ్వండి. మీరు ఒక అనుభవశూన్యుడు అయితే, ఇక్కడ పరిచయం చేయబడిన భావనల గురించి మరింత లోతైన వివరణల కోసం మా ట్యుటోరియల్స్ లేదా [course](https://huggingface.co/course/chapter1/1)ని తనిఖీ చేయమని మేము సిఫార్సు చేస్తున్నాము. మీరు ప్రారంభించడానికి ముందు, మీరు అవసరమైన అన్ని లైబ్రరీలను ఇన్‌స్టాల్ చేశారని నిర్ధారించుకోండి: ```bash !pip install transformers datasets evaluate accelerate ``` మీరు మీ ప్రాధాన్య యంత్ర అభ్యాస ఫ్రేమ్‌వర్క్‌ను కూడా ఇన్‌స్టాల్ చేయాలి: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## పైప్‌లైన్ <Youtube id="tiZFewofSLM"/> [`pipeline`] అనుమితి కోసం ముందుగా శిక్షణ పొందిన నమూనాను ఉపయోగించడానికి సులభమైన మరియు వేగవంతమైన మార్గం. మీరు వివిధ పద్ధతులలో అనేక పనుల కోసం [`pipeline`] వెలుపల ఉపయోగించవచ్చు, వాటిలో కొన్ని క్రింది పట్టికలో చూపబడ్డాయి: <Tip> అందుబాటులో ఉన్న పనుల పూర్తి జాబితా కోసం, [పైప్‌లైన్ API సూచన](./main_classes/pipelines)ని తనిఖీ చేయండి. </Tip> Here is the translation in Telugu: | **పని** | **వివరణ** | **మోడాలిటీ** | **పైప్‌లైన్ ఐడెంటిఫైయర్** | |------------------------------|--------------------------------------------------------------------------------------------------------|-----------------|------------------------------------------| | వచన వర్గీకరణు | కొన్ని వచనాల అంతా ఒక లేబుల్‌ను కొడి | NLP | pipeline(task=“sentiment-analysis”) | | వచన సృష్టి | ప్రమ్పుటం కలిగినంత వచనం సృష్టించండి | NLP | pipeline(task=“text-generation”) | | సంక్షేపణ | వచనం లేదా పత్రం కొరకు సంక్షేపణ తయారుచేసండి | NLP | pipeline(task=“summarization”) | | చిత్రం వర్గీకరణు | చిత్రంలో ఒక లేబుల్‌ను కొడి | కంప్యూటర్ విషయం | pipeline(task=“image-classification”) | | చిత్రం విభజన | ఒక చిత్రంలో ప్రతి వ్యక్తిగత పిక్సల్‌ను ఒక లేబుల్‌గా నమోదు చేయండి (సెమాంటిక్, పానొప్టిక్, మరియు ఇన్స్టన్స్ విభజనలను మద్దతు చేస్తుంది) | కంప్యూటర్ విషయం | pipeline(task=“image-segmentation”) | | వస్త్రం గుర్తువు | ఒక చిత్రంలో పదాల యొక్క బౌండింగ్ బాక్స్‌లను మరియు వస్త్రాల వర్గాలను అంచనా చేయండి | కంప్యూటర్ విషయం | pipeline(task=“object-detection”) | | ఆడియో గుర్తువు | కొన్ని ఆడియో డేటానికి ఒక లేబుల్‌ను కొడి | ఆడియో | pipeline(task=“audio-classification”) | | స్వయంచలన ప్రసంగ గుర్తువు | ప్రసంగాన్ని వచనంగా వర్ణించండి | ఆడియో | pipeline(task=“automatic-speech-recognition”) | | దృశ్య ప్రశ్న సంవాదం | వచనం మరియు ప్రశ్నను నమోదు చేసిన చిత్రంతో ప్రశ్నకు సమాధానం ఇవ్వండి | బహుమూలిక | pipeline(task=“vqa”) | | పత్రం ప్రశ్న సంవాదం | ప్రశ్నను పత్రం లేదా డాక్యుమెంట్‌తో సమాధానం ఇవ్వండి | బహుమూలిక | pipeline(task="document-question-answering") | | చిత్రం వ్రాసాయింగ్ | కొన్ని చిత్రానికి పిటియార్లను సృష్టించండి | బహుమూలిక | pipeline(task="image-to-text") | [`pipeline`] యొక్క ఉదాహరణను సృష్టించడం ద్వారా మరియు మీరు దానిని ఉపయోగించాలనుకుంటున్న పనిని పేర్కొనడం ద్వారా ప్రారంభించండి. ఈ గైడ్‌లో, మీరు సెంటిమెంట్ విశ్లేషణ కోసం [`pipeline`]ని ఉదాహరణగా ఉపయోగిస్తారు: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` సెంటిమెంట్ విశ్లేషణ కోసం [`pipeline`] డిఫాల్ట్ [ప్రీట్రైన్డ్ మోడల్](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english) మరియు టోకెనైజర్‌ని డౌన్‌లోడ్ చేస్తుంది మరియు కాష్ చేస్తుంది. ఇప్పుడు మీరు మీ లక్ష్య వచనంలో `classifier`ని ఉపయోగించవచ్చు: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` మీరు ఒకటి కంటే ఎక్కువ ఇన్‌పుట్‌లను కలిగి ఉంటే, నిఘంటువుల జాబితాను అందించడానికి మీ ఇన్‌పుట్‌లను జాబితాగా [`pipeline`]కి పంపండి: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` [`pipeline`] మీకు నచ్చిన ఏదైనా పని కోసం మొత్తం డేటాసెట్‌ను కూడా పునరావృతం చేయగలదు. ఈ ఉదాహరణ కోసం, స్వయంచాలక ప్రసంగ గుర్తింపును మన పనిగా ఎంచుకుందాం: ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` మీరు మళ్లీ మళ్లీ చెప్పాలనుకుంటున్న ఆడియో డేటాసెట్‌ను లోడ్ చేయండి (మరిన్ని వివరాల కోసం 🤗 డేటాసెట్‌లు [త్వరిత ప్రారంభం](https://huggingface.co/docs/datasets/quickstart#audio) చూడండి. ఉదాహరణకు, [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) డేటాసెట్‌ను లోడ్ చేయండి: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` డేటాసెట్ యొక్క నమూనా రేటు నమూనాతో సరిపోలుతుందని మీరు నిర్ధారించుకోవాలి రేటు [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) దీనిపై శిక్షణ పొందింది: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` `"ఆడియో"` కాలమ్‌కి కాల్ చేస్తున్నప్పుడు ఆడియో ఫైల్‌లు స్వయంచాలకంగా లోడ్ చేయబడతాయి మరియు మళ్లీ నమూనా చేయబడతాయి. మొదటి 4 నమూనాల నుండి ముడి వేవ్‌ఫార్మ్ శ్రేణులను సంగ్రహించి, పైప్‌లైన్‌కు జాబితాగా పాస్ చేయండి: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` ఇన్‌పుట్‌లు పెద్దగా ఉన్న పెద్ద డేటాసెట్‌ల కోసం (స్పీచ్ లేదా విజన్ వంటివి), మెమరీలోని అన్ని ఇన్‌పుట్‌లను లోడ్ చేయడానికి మీరు జాబితాకు బదులుగా జెనరేటర్‌ను పాస్ చేయాలనుకుంటున్నారు. మరింత సమాచారం కోసం [పైప్‌లైన్ API సూచన](./main_classes/pipelines)ని చూడండి. ### పైప్‌లైన్‌లో మరొక మోడల్ మరియు టోకెనైజర్‌ని ఉపయోగించండి [`pipeline`] [Hub](https://huggingface.co/models) నుండి ఏదైనా మోడల్‌ను కలిగి ఉంటుంది, దీని వలన ఇతర వినియోగ-కేసుల కోసం [`pipeline`]ని సులభంగా స్వీకరించవచ్చు. ఉదాహరణకు, మీరు ఫ్రెంచ్ టెక్స్ట్‌ను హ్యాండిల్ చేయగల మోడల్ కావాలనుకుంటే, తగిన మోడల్ కోసం ఫిల్టర్ చేయడానికి హబ్‌లోని ట్యాగ్‌లను ఉపయోగించండి. అగ్ర ఫిల్టర్ చేసిన ఫలితం మీరు ఫ్రెంచ్ టెక్స్ట్ కోసం ఉపయోగించగల సెంటిమెంట్ విశ్లేషణ కోసం ఫైన్‌ట్యూన్ చేయబడిన బహుభాషా [BERT మోడల్](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment)ని అందిస్తుంది: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> ముందుగా శిక్షణ పొందిన మోడల్‌ను లోడ్ చేయడానికి [`AutoModelForSequenceClassification`] మరియు [`AutoTokenizer`]ని ఉపయోగించండి మరియు దాని అనుబంధిత టోకెనైజర్ (తదుపరి విభాగంలో `AutoClass`పై మరిన్ని): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> ముందుగా శిక్షణ పొందిన మోడల్‌ను లోడ్ చేయడానికి [`TFAutoModelForSequenceClassification`] మరియు [`AutoTokenizer`]ని ఉపయోగించండి మరియు దాని అనుబంధిత టోకెనైజర్ (తదుపరి విభాగంలో `TFAutoClass`పై మరిన్ని): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> [`pipeline`]లో మోడల్ మరియు టోకెనైజర్‌ను పేర్కొనండి మరియు ఇప్పుడు మీరు ఫ్రెంచ్ టెక్స్ట్‌పై `క్లాసిఫైయర్`ని వర్తింపజేయవచ్చు: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` మీరు మీ వినియోగ-కేస్ కోసం మోడల్‌ను కనుగొనలేకపోతే, మీరు మీ డేటాపై ముందుగా శిక్షణ పొందిన మోడల్‌ను చక్కగా మార్చాలి. ఎలాగో తెలుసుకోవడానికి మా [ఫైన్‌ట్యూనింగ్ ట్యుటోరియల్](./training)ని చూడండి. చివరగా, మీరు మీ ప్రీట్రైన్డ్ మోడల్‌ని ఫైన్‌ట్యూన్ చేసిన తర్వాత, దయచేసి అందరి కోసం మెషిన్ లెర్నింగ్‌ని డెమోక్రటైజ్ చేయడానికి హబ్‌లోని సంఘంతో మోడల్‌ను [షేరింగ్](./model_sharing) పరిగణించండి! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> హుడ్ కింద, మీరు పైన ఉపయోగించిన [`pipeline`]కి శక్తిని అందించడానికి [`AutoModelForSequenceClassification`] మరియు [`AutoTokenizer`] తరగతులు కలిసి పని చేస్తాయి. ఒక [AutoClass](./model_doc/auto) అనేది ముందుగా శిక్షణ పొందిన మోడల్ యొక్క ఆర్కిటెక్చర్‌ను దాని పేరు లేదా మార్గం నుండి స్వయంచాలకంగా తిరిగి పొందే సత్వరమార్గం. మీరు మీ టాస్క్ కోసం తగిన `ఆటోక్లాస్`ని మాత్రమే ఎంచుకోవాలి మరియు ఇది అనుబంధిత ప్రీప్రాసెసింగ్ క్లాస్. మునుపటి విభాగం నుండి ఉదాహరణకి తిరిగి వెళ్లి, [`pipeline`] ఫలితాలను ప్రతిబింబించడానికి మీరు `ఆటోక్లాస్`ని ఎలా ఉపయోగించవచ్చో చూద్దాం. ### AutoTokenizer ఒక మోడల్‌కు ఇన్‌పుట్‌లుగా సంఖ్యల శ్రేణిలో వచనాన్ని ప్రీప్రాసెసింగ్ చేయడానికి టోకెనైజర్ బాధ్యత వహిస్తుంది. పదాన్ని ఎలా విభజించాలి మరియు ఏ స్థాయిలో పదాలను విభజించాలి ([tokenizer సారాంశం](./tokenizer_summary)లో టోకనైజేషన్ గురించి మరింత తెలుసుకోండి) సహా టోకనైజేషన్ ప్రక్రియను నియంత్రించే అనేక నియమాలు ఉన్నాయి. గుర్తుంచుకోవలసిన ముఖ్యమైన విషయం ఏమిటంటే, మీరు మోడల్‌కు ముందే శిక్షణ పొందిన అదే టోకనైజేషన్ నియమాలను ఉపయోగిస్తున్నారని నిర్ధారించుకోవడానికి మీరు అదే మోడల్ పేరుతో టోకెనైజర్‌ను తక్షణం చేయాలి. [`AutoTokenizer`]తో టోకెనైజర్‌ను లోడ్ చేయండి: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` మీ వచనాన్ని టోకెనైజర్‌కు పంపండి: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` టోకెనైజర్ వీటిని కలిగి ఉన్న నిఘంటువుని అందిస్తుంది: * [input_ids](./glossary#input-ids): మీ టోకెన్‌ల సంఖ్యాపరమైన ప్రాతినిధ్యం. * [అటెన్షన్_మాస్క్](./glossary#attention-mask): ఏ టోకెన్‌లకు హాజరు కావాలో సూచిస్తుంది. ఒక టోకెనైజర్ ఇన్‌పుట్‌ల జాబితాను కూడా ఆమోదించగలదు మరియు ఏకరీతి పొడవుతో బ్యాచ్‌ను తిరిగి ఇవ్వడానికి టెక్స్ట్‌ను ప్యాడ్ చేసి కత్తిరించవచ్చు: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> టోకనైజేషన్ గురించి మరిన్ని వివరాల కోసం [ప్రీప్రాసెస్](./preprocessing) ట్యుటోరియల్‌ని చూడండి మరియు ఇమేజ్, ఆడియో మరియు మల్టీమోడల్ ఇన్‌పుట్‌లను ప్రీప్రాసెస్ చేయడానికి [`AutoImageProcessor`], [`AutoFeatureExtractor`] మరియు [`AutoProcessor`] ఎలా ఉపయోగించాలి. </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 ట్రాన్స్‌ఫార్మర్లు ప్రీట్రైన్డ్ ఇన్‌స్టాన్స్‌లను లోడ్ చేయడానికి సులభమైన మరియు ఏకీకృత మార్గాన్ని అందిస్తాయి. దీని అర్థం మీరు [`AutoTokenizer`]ని లోడ్ చేసినట్లుగా [`AutoModel`]ని లోడ్ చేయవచ్చు. టాస్క్ కోసం సరైన [`AutoModel`]ని ఎంచుకోవడం మాత్రమే తేడా. టెక్స్ట్ (లేదా సీక్వెన్స్) వర్గీకరణ కోసం, మీరు [`AutoModelForSequenceClassification`]ని లోడ్ చేయాలి: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`] క్లాస్ ద్వారా సపోర్ట్ చేసే టాస్క్‌ల కోసం [టాస్క్ సారాంశం](./task_summary)ని చూడండి. </Tip> ఇప్పుడు మీ ప్రీప్రాసెస్ చేయబడిన బ్యాచ్ ఇన్‌పుట్‌లను నేరుగా మోడల్‌కి పంపండి. మీరు `**`ని జోడించడం ద్వారా నిఘంటువుని అన్‌ప్యాక్ చేయాలి: ```py >>> pt_outputs = pt_model(**pt_batch) ``` మోడల్ తుది యాక్టివేషన్‌లను `logits` లక్షణంలో అవుట్‌పుట్ చేస్తుంది. సంభావ్యతలను తిరిగి పొందడానికి సాఫ్ట్‌మాక్స్ ఫంక్షన్‌ను `logits` కు వర్తింపజేయండి: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 ట్రాన్స్‌ఫార్మర్లు ప్రీట్రైన్డ్ ఇన్‌స్టాన్స్‌లను లోడ్ చేయడానికి సులభమైన మరియు ఏకీకృత మార్గాన్ని అందిస్తాయి. మీరు [`AutoTokenizer`]ని లోడ్ చేసినట్లుగా మీరు [`TFAutoModel`]ని లోడ్ చేయవచ్చని దీని అర్థం. టాస్క్ కోసం సరైన [`TFAutoModel`]ని ఎంచుకోవడం మాత్రమే తేడా. టెక్స్ట్ (లేదా సీక్వెన్స్) వర్గీకరణ కోసం, మీరు [`TFAutoModelForSequenceClassification`]ని లోడ్ చేయాలి: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`] క్లాస్ ద్వారా సపోర్ట్ చేసే టాస్క్‌ల కోసం [టాస్క్ సారాంశం](./task_summary)ని చూడండి. </Tip> ఇప్పుడు మీ ప్రీప్రాసెస్ చేయబడిన బ్యాచ్ ఇన్‌పుట్‌లను నేరుగా మోడల్‌కి పంపండి. మీరు టెన్సర్‌లను ఇలా పాస్ చేయవచ్చు: ```py >>> tf_outputs = tf_model(tf_batch) ``` మోడల్ తుది యాక్టివేషన్‌లను `logits` లక్షణంలో అవుట్‌పుట్ చేస్తుంది. సంభావ్యతలను తిరిగి పొందడానికి సాఫ్ట్‌మాక్స్ ఫంక్షన్‌ను `logits`కు వర్తింపజేయండి: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> అన్ని 🤗 ట్రాన్స్‌ఫార్మర్స్ మోడల్‌లు (PyTorch లేదా TensorFlow) తుది యాక్టివేషన్‌కు *ముందు* టెన్సర్‌లను అవుట్‌పుట్ చేస్తాయి ఫంక్షన్ (softmax వంటిది) ఎందుకంటే చివరి యాక్టివేషన్ ఫంక్షన్ తరచుగా నష్టంతో కలిసిపోతుంది. మోడల్ అవుట్‌పుట్‌లు ప్రత్యేక డేటాక్లాస్‌లు కాబట్టి వాటి లక్షణాలు IDEలో స్వయంచాలకంగా పూర్తి చేయబడతాయి. మోడల్ అవుట్‌పుట్‌లు టుపుల్ లేదా డిక్షనరీ లాగా ప్రవర్తిస్తాయి (మీరు పూర్ణాంకం, స్లైస్ లేదా స్ట్రింగ్‌తో ఇండెక్స్ చేయవచ్చు) ఈ సందర్భంలో, ఏదీ లేని గుణాలు విస్మరించబడతాయి. </Tip> ### మోడల్‌ను సేవ్ చేయండి <frameworkcontent> <pt> మీ మోడల్ చక్కగా ట్యూన్ చేయబడిన తర్వాత, మీరు దానిని [`PreTrainedModel.save_pretrained`]ని ఉపయోగించి దాని టోకెనైజర్‌తో సేవ్ చేయవచ్చు: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` మీరు మోడల్‌ని మళ్లీ ఉపయోగించడానికి సిద్ధంగా ఉన్నప్పుడు, దాన్ని [`PreTrainedModel.from_pretrained`]తో రీలోడ్ చేయండి: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> మీ మోడల్ చక్కగా ట్యూన్ చేయబడిన తర్వాత, మీరు దానిని [`TFPreTrainedModel.save_pretrained`]ని ఉపయోగించి దాని టోకెనైజర్‌తో సేవ్ చేయవచ్చు: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` మీరు మోడల్‌ని మళ్లీ ఉపయోగించడానికి సిద్ధంగా ఉన్నప్పుడు, దాన్ని [`TFPreTrainedModel.from_pretrained`]తో రీలోడ్ చేయండి: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> ఒక ప్రత్యేకించి అద్భుతమైన 🤗 ట్రాన్స్‌ఫార్మర్స్ ఫీచర్ మోడల్‌ను సేవ్ చేయగల సామర్థ్యం మరియు దానిని PyTorch లేదా TensorFlow మోడల్‌గా రీలోడ్ చేయగలదు. `from_pt` లేదా `from_tf` పరామితి మోడల్‌ను ఒక ఫ్రేమ్‌వర్క్ నుండి మరొక ఫ్రేమ్‌వర్క్‌కి మార్చగలదు: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## కస్టమ్ మోడల్ బిల్డ్స్ మోడల్ ఎలా నిర్మించబడుతుందో మార్చడానికి మీరు మోడల్ కాన్ఫిగరేషన్ క్లాస్‌ని సవరించవచ్చు. దాచిన లేయర్‌లు లేదా అటెన్షన్ హెడ్‌ల సంఖ్య వంటి మోడల్ లక్షణాలను కాన్ఫిగరేషన్ నిర్దేశిస్తుంది. మీరు కస్టమ్ కాన్ఫిగరేషన్ క్లాస్ నుండి మోడల్‌ను ప్రారంభించినప్పుడు మీరు మొదటి నుండి ప్రారంభిస్తారు. మోడల్ అట్రిబ్యూట్‌లు యాదృచ్ఛికంగా ప్రారంభించబడ్డాయి మరియు అర్థవంతమైన ఫలితాలను పొందడానికి మీరు మోడల్‌ను ఉపయోగించే ముందు దానికి శిక్షణ ఇవ్వాలి. [`AutoConfig`]ని దిగుమతి చేయడం ద్వారా ప్రారంభించండి, ఆపై మీరు సవరించాలనుకుంటున్న ప్రీట్రైన్డ్ మోడల్‌ను లోడ్ చేయండి. [`AutoConfig.from_pretrained`]లో, మీరు అటెన్షన్ హెడ్‌ల సంఖ్య వంటి మీరు మార్చాలనుకుంటున్న లక్షణాన్ని పేర్కొనవచ్చు: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> [`AutoModel.from_config`]తో మీ అనుకూల కాన్ఫిగరేషన్ నుండి మోడల్‌ను సృష్టించండి: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> [`TFAutoModel.from_config`]తో మీ అనుకూల కాన్ఫిగరేషన్ నుండి మోడల్‌ను సృష్టించండి: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> అనుకూల కాన్ఫిగరేషన్‌లను రూపొందించడం గురించి మరింత సమాచారం కోసం [కస్టమ్ ఆర్కిటెక్చర్‌ని సృష్టించండి](./create_a_model) గైడ్‌ను చూడండి. ## శిక్షకుడు - పైటార్చ్ ఆప్టిమైజ్ చేసిన శిక్షణ లూప్ అన్ని మోడల్‌లు ప్రామాణికమైన [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) కాబట్టి మీరు వాటిని ఏదైనా సాధారణ శిక్షణ లూప్‌లో ఉపయోగించవచ్చు. మీరు మీ స్వంత శిక్షణ లూప్‌ను వ్రాయగలిగినప్పటికీ, 🤗 ట్రాన్స్‌ఫార్మర్లు PyTorch కోసం [`ట్రైనర్`] తరగతిని అందజేస్తాయి, ఇందులో ప్రాథమిక శిక్షణ లూప్ ఉంటుంది మరియు పంపిణీ చేయబడిన శిక్షణ, మిశ్రమ ఖచ్చితత్వం మరియు మరిన్ని వంటి ఫీచర్‌ల కోసం అదనపు కార్యాచరణను జోడిస్తుంది. మీ విధిని బట్టి, మీరు సాధారణంగా కింది పారామితులను [`ట్రైనర్`]కి పంపుతారు: 1. మీరు [`PreTrainedModel`] లేదా [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)తో ప్రారంభిస్తారు: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`] మీరు నేర్చుకునే రేటు, బ్యాచ్ పరిమాణం మరియు శిక్షణ పొందవలసిన యుగాల సంఖ్య వంటి మార్చగల మోడల్ హైపర్‌పారామీటర్‌లను కలిగి ఉంది. మీరు ఎలాంటి శిక్షణా వాదనలను పేర్కొనకుంటే డిఫాల్ట్ విలువలు ఉపయోగించబడతాయి: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. టోకెనైజర్, ఇమేజ్ ప్రాసెసర్, ఫీచర్ ఎక్స్‌ట్రాక్టర్ లేదా ప్రాసెసర్ వంటి ప్రీప్రాసెసింగ్ క్లాస్‌ని లోడ్ చేయండి: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. డేటాసెట్‌ను లోడ్ చేయండి: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. డేటాసెట్‌ను టోకనైజ్ చేయడానికి ఒక ఫంక్షన్‌ను సృష్టించండి: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` ఆపై దానిని [`~datasets.Dataset.map`]తో మొత్తం డేటాసెట్‌లో వర్తింపజేయండి: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. మీ డేటాసెట్ నుండి ఉదాహరణల సమూహాన్ని సృష్టించడానికి [`DataCollatorWithPadding`]: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` ఇప్పుడు ఈ తరగతులన్నింటినీ [`Trainer`]లో సేకరించండి: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... processing_class=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` మీరు సిద్ధంగా ఉన్నప్పుడు, శిక్షణను ప్రారంభించడానికి [`~Trainer.train`]కి కాల్ చేయండి: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> సీక్వెన్స్-టు-సీక్వెన్స్ మోడల్‌ని ఉపయోగించే - అనువాదం లేదా సారాంశం వంటి పనుల కోసం, బదులుగా [`Seq2SeqTrainer`] మరియు [`Seq2SeqTrainingArguments`] తరగతులను ఉపయోగించండి. </Tip> మీరు [`Trainer`] లోపల ఉన్న పద్ధతులను ఉపవర్గీకరించడం ద్వారా శిక్షణ లూప్ ప్రవర్తనను అనుకూలీకరించవచ్చు. ఇది లాస్ ఫంక్షన్, ఆప్టిమైజర్ మరియు షెడ్యూలర్ వంటి లక్షణాలను అనుకూలీకరించడానికి మిమ్మల్ని అనుమతిస్తుంది. ఉపవర్గీకరించబడే పద్ధతుల కోసం [`Trainer`] సూచనను పరిశీలించండి. శిక్షణ లూప్‌ను అనుకూలీకరించడానికి మరొక మార్గం [కాల్‌బ్యాక్‌లు](./main_classes/callback). మీరు ఇతర లైబ్రరీలతో అనుసంధానం చేయడానికి కాల్‌బ్యాక్‌లను ఉపయోగించవచ్చు మరియు పురోగతిపై నివేదించడానికి శిక్షణ లూప్‌ను తనిఖీ చేయవచ్చు లేదా శిక్షణను ముందుగానే ఆపవచ్చు. శిక్షణ లూప్‌లోనే కాల్‌బ్యాక్‌లు దేనినీ సవరించవు. లాస్ ఫంక్షన్ వంటివాటిని అనుకూలీకరించడానికి, మీరు బదులుగా [`Trainer`]ని ఉపవర్గం చేయాలి. ## TensorFlowతో శిక్షణ పొందండి అన్ని మోడల్‌లు ప్రామాణికమైన [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) కాబట్టి వాటిని [Keras]తో TensorFlowలో శిక్షణ పొందవచ్చు(https: //keras.io/) API. 🤗 ట్రాన్స్‌ఫార్మర్‌లు మీ డేటాసెట్‌ని సులభంగా `tf.data.Dataset`గా లోడ్ చేయడానికి [`~TFPreTrainedModel.prepare_tf_dataset`] పద్ధతిని అందజేస్తుంది కాబట్టి మీరు వెంటనే Keras' [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) మరియు [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) పద్ధతులు. 1. మీరు [`TFPreTrainedModel`] లేదా [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)తో ప్రారంభిస్తారు: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. టోకెనైజర్, ఇమేజ్ ప్రాసెసర్, ఫీచర్ ఎక్స్‌ట్రాక్టర్ లేదా ప్రాసెసర్ వంటి ప్రీప్రాసెసింగ్ క్లాస్‌ని లోడ్ చేయండి: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. డేటాసెట్‌ను టోకనైజ్ చేయడానికి ఒక ఫంక్షన్‌ను సృష్టించండి: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. [`~datasets.Dataset.map`]తో మొత్తం డేటాసెట్‌పై టోకెనైజర్‌ని వర్తింపజేయి, ఆపై డేటాసెట్ మరియు టోకెనైజర్‌ను [`~TFPreTrainedModel.prepare_tf_dataset`]కి పంపండి. మీరు కావాలనుకుంటే బ్యాచ్ పరిమాణాన్ని కూడా మార్చవచ్చు మరియు డేటాసెట్‌ను ఇక్కడ షఫుల్ చేయవచ్చు: ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. మీరు సిద్ధంగా ఉన్నప్పుడు, శిక్షణను ప్రారంభించడానికి మీరు `కంపైల్` మరియు `ఫిట్`కి కాల్ చేయవచ్చు. ట్రాన్స్‌ఫార్మర్స్ మోడల్స్ అన్నీ డిఫాల్ట్ టాస్క్-సంబంధిత లాస్ ఫంక్షన్‌ని కలిగి ఉన్నాయని గుర్తుంచుకోండి, కాబట్టి మీరు కోరుకునే వరకు మీరు ఒకదానిని పేర్కొనవలసిన అవసరం లేదు: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # No loss argument! >>> model.fit(tf_dataset) # doctest: +SKIP ``` ## తరవాత ఏంటి? ఇప్పుడు మీరు 🤗 ట్రాన్స్‌ఫార్మర్స్ త్వరిత పర్యటనను పూర్తి చేసారు, మా గైడ్‌లను తనిఖీ చేయండి మరియు అనుకూల మోడల్‌ను వ్రాయడం, టాస్క్ కోసం మోడల్‌ను చక్కగా తీర్చిదిద్దడం మరియు స్క్రిప్ట్‌తో మోడల్‌కు శిక్షణ ఇవ్వడం వంటి మరింత నిర్దిష్టమైన పనులను ఎలా చేయాలో తెలుసుకోండి. 🤗 ట్రాన్స్‌ఫార్మర్స్ కోర్ కాన్సెప్ట్‌ల గురించి మరింత తెలుసుకోవడానికి మీకు ఆసక్తి ఉంటే, ఒక కప్పు కాఫీ తాగి, మా కాన్సెప్టువల్ గైడ్‌లను చూడండి!
transformers/docs/source/te/quicktour.md/0
{ "file_path": "transformers/docs/source/te/quicktour.md", "repo_id": "transformers", "token_count": 37563 }
52
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 共享自定义模型 🤗 Transformers 库设计得易于扩展。每个模型的代码都在仓库给定的子文件夹中,没有进行抽象,因此你可以轻松复制模型代码文件并根据需要进行调整。 如果你要编写全新的模型,从头开始可能更容易。在本教程中,我们将向你展示如何编写自定义模型及其配置,以便可以在 Transformers 中使用它;以及如何与社区共享它(及其依赖的代码),以便任何人都可以使用,即使它不在 🤗 Transformers 库中。 我们将以 ResNet 模型为例,通过将 [timm 库](https://github.com/rwightman/pytorch-image-models) 的 ResNet 类封装到 [`PreTrainedModel`] 中来进行说明。 ## 编写自定义配置 在深入研究模型之前,让我们首先编写其配置。模型的配置是一个对象,其中包含构建模型所需的所有信息。我们将在下一节中看到,模型只能接受一个 `config` 来进行初始化,因此我们很需要使该对象尽可能完整。 我们将采用一些我们可能想要调整的 ResNet 类的参数举例。不同的配置将为我们提供不同类型可能的 ResNet 模型。在确认其中一些参数的有效性后,我们只需存储这些参数。 ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` 编写自定义配置时需要记住的三个重要事项如下: - 必须继承自 `PretrainedConfig`, - `PretrainedConfig` 的 `__init__` 方法必须接受任何 kwargs, - 这些 `kwargs` 需要传递给超类的 `__init__` 方法。 继承是为了确保你获得来自 🤗 Transformers 库的所有功能,而另外两个约束源于 `PretrainedConfig` 的字段比你设置的字段多。在使用 `from_pretrained` 方法重新加载配置时,这些字段需要被你的配置接受,然后传递给超类。 为你的配置定义 `model_type`(此处为 `model_type="resnet"`)不是必须的,除非你想使用自动类注册你的模型(请参阅最后一节)。 做完这些以后,就可以像使用库里任何其他模型配置一样,轻松地创建和保存配置。以下代码展示了如何创建并保存 resnet50d 配置: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` 这行代码将在 `custom-resnet` 文件夹内保存一个名为 `config.json` 的文件。然后,你可以使用 `from_pretrained` 方法重新加载配置: ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` 你还可以使用 [`PretrainedConfig`] 类的任何其他方法,例如 [`~PretrainedConfig.push_to_hub`],直接将配置上传到 Hub。 ## 编写自定义模型 有了 ResNet 配置后,就可以继续编写模型了。实际上,我们将编写两个模型:一个模型用于从一批图像中提取隐藏特征(类似于 [`BertModel`]),另一个模型适用于图像分类(类似于 [`BertForSequenceClassification`])。 正如之前提到的,我们只会编写一个松散的模型包装,以使示例保持简洁。在编写此类之前,只需要建立起块类型(block types)与实际块类(block classes)之间的映射。然后,通过将所有内容传递给ResNet类,从配置中定义模型: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` 对用于进行图像分类的模型,我们只需更改前向方法: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.functional.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` 在这两种情况下,请注意我们如何继承 `PreTrainedModel` 并使用 `config` 调用了超类的初始化(有点像编写常规的torch.nn.Module)。设置 `config_class` 的那行代码不是必须的,除非你想使用自动类注册你的模型(请参阅最后一节)。 <Tip> 如果你的模型与库中的某个模型非常相似,你可以重用与该模型相同的配置。 </Tip> 你可以让模型返回任何你想要的内容,但是像我们为 `ResnetModelForImageClassification` 做的那样返回一个字典,并在传递标签时包含loss,可以使你的模型能够在 [`Trainer`] 类中直接使用。只要你计划使用自己的训练循环或其他库进行训练,也可以使用其他输出格式。 现在我们已经有了模型类,让我们创建一个: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` 同样的,你可以使用 [`PreTrainedModel`] 的任何方法,比如 [`~PreTrainedModel.save_pretrained`] 或者 [`~PreTrainedModel.push_to_hub`]。我们将在下一节中使用第二种方法,并了解如何如何使用我们的模型的代码推送模型权重。但首先,让我们在模型内加载一些预训练权重。 在你自己的用例中,你可能会在自己的数据上训练自定义模型。为了快速完成本教程,我们将使用 resnet50d 的预训练版本。由于我们的模型只是它的包装,转移这些权重将会很容易: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` 现在让我们看看,如何确保在执行 [`~PreTrainedModel.save_pretrained`] 或 [`~PreTrainedModel.push_to_hub`] 时,模型的代码被保存。 ## 将代码发送到 Hub <Tip warning={true}> 此 API 是实验性的,未来的发布中可能会有一些轻微的不兼容更改。 </Tip> 首先,确保你的模型在一个 `.py` 文件中完全定义。只要所有文件都位于同一目录中,它就可以依赖于某些其他文件的相对导入(目前我们还不为子模块支持此功能)。对于我们的示例,我们将在当前工作目录中名为 `resnet_model` 的文件夹中定义一个 `modeling_resnet.py` 文件和一个 `configuration_resnet.py` 文件。 配置文件包含 `ResnetConfig` 的代码,模型文件包含 `ResnetModel` 和 `ResnetModelForImageClassification` 的代码。 ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` `__init__.py` 可以为空,它的存在只是为了让 Python 检测到 `resnet_model` 可以用作模块。 <Tip warning={true}> 如果从库中复制模型文件,你需要将文件顶部的所有相对导入替换为从 `transformers` 包中的导入。 </Tip> 请注意,你可以重用(或子类化)现有的配置/模型。 要与社区共享您的模型,请参照以下步骤:首先从新创建的文件中导入ResNet模型和配置: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` 接下来,你需要告诉库,当使用 `save_pretrained` 方法时,你希望复制这些对象的代码文件,并将它们正确注册到给定的 Auto 类(特别是对于模型),只需要运行以下代码: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` 请注意,对于配置(只有一个自动类 [`AutoConfig`]),不需要指定自动类,但对于模型来说情况不同。 你的自定义模型可能适用于许多不同的任务,因此你必须指定哪一个自动类适合你的模型。 接下来,让我们像之前一样创建配置和模型: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` 现在要将模型推送到集线器,请确保你已登录。你看可以在终端中运行以下命令: ```bash huggingface-cli login ``` 或者在笔记本中运行以下代码: ```py from huggingface_hub import notebook_login notebook_login() ``` 然后,可以这样将模型推送到自己的命名空间(或你所属的组织): ```py resnet50d.push_to_hub("custom-resnet50d") ``` 除了模型权重和 JSON 格式的配置外,这行代码也会复制 `custom-resnet50d` 文件夹内的模型以及配置的 `.py` 文件并将结果上传至 Hub。你可以在此[模型仓库](https://huggingface.co/sgugger/custom-resnet50d)中查看结果。 有关推推送至 Hub 方法的更多信息,请参阅[共享教程](model_sharing)。 ## 使用带有自定义代码的模型 可以使用自动类(auto-classes)和 `from_pretrained` 方法,使用模型仓库里带有自定义代码的配置、模型或分词器文件。所有上传到 Hub 的文件和代码都会进行恶意软件扫描(有关更多信息,请参阅 [Hub 安全](https://huggingface.co/docs/hub/security#malware-scanning) 文档), 但你仍应查看模型代码和作者,以避免在你的计算机上执行恶意代码。 设置 `trust_remote_code=True` 以使用带有自定义代码的模型: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` 我们强烈建议为 `revision` 参数传递提交哈希(commit hash),以确保模型的作者没有使用一些恶意的代码行更新了代码(除非您完全信任模型的作者)。 ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` 在 Hub 上浏览模型仓库的提交历史时,有一个按钮可以轻松复制任何提交的提交哈希。 ## 将自定义代码的模型注册到自动类 如果你在编写一个扩展 🤗 Transformers 的库,你可能想要扩展自动类以包含您自己的模型。这与将代码推送到 Hub 不同,因为用户需要导入你的库才能获取自定义模型(与从 Hub 自动下载模型代码相反)。 只要你的配置 `model_type` 属性与现有模型类型不同,并且你的模型类有正确的 `config_class` 属性,你可以像这样将它们添加到自动类中: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` 请注意,将自定义配置注册到 [`AutoConfig`] 时,使用的第一个参数需要与自定义配置的 `model_type` 匹配;而将自定义模型注册到任何自动模型类时,使用的第一个参数需要与 `config_class` 匹配。
transformers/docs/source/zh/custom_models.md/0
{ "file_path": "transformers/docs/source/zh/custom_models.md", "repo_id": "transformers", "token_count": 7977 }
53
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Processors 在 Transformers 库中,processors可以有两种不同的含义: - 为多模态模型,例如[Wav2Vec2](../model_doc/wav2vec2)(语音和文本)或[CLIP](../model_doc/clip)(文本和视觉)预处理输入的对象 - 在库的旧版本中用于预处理GLUE或SQUAD数据的已弃用对象。 ## 多模态processors 任何多模态模型都需要一个对象来编码或解码将多个模态(包括文本、视觉和音频)组合在一起的数据。这由称为processors的对象处理,这些processors将两个或多个处理对象组合在一起,例如tokenizers(用于文本模态),image processors(用于视觉)和feature extractors(用于音频)。 这些processors继承自以下实现保存和加载功能的基类: [[autodoc]] ProcessorMixin ## 已弃用的processors 所有processor都遵循与 [`~data.processors.utils.DataProcessor`] 相同的架构。processor返回一个 [`~data.processors.utils.InputExample`] 列表。这些 [`~data.processors.utils.InputExample`] 可以转换为 [`~data.processors.utils.InputFeatures`] 以供输送到模型。 [[autodoc]] data.processors.utils.DataProcessor [[autodoc]] data.processors.utils.InputExample [[autodoc]] data.processors.utils.InputFeatures ## GLUE [General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) 是一个基准测试,评估模型在各种现有的自然语言理解任务上的性能。它与论文 [GLUE: A multi-task benchmark and analysis platform for natural language understanding](https://openreview.net/pdf?id=rJ4km2R5t7) 一同发布。 该库为以下任务提供了总共10个processor:MRPC、MNLI、MNLI(mismatched)、CoLA、SST2、STSB、QQP、QNLI、RTE 和 WNLI。 这些processor是: - [`~data.processors.utils.MrpcProcessor`] - [`~data.processors.utils.MnliProcessor`] - [`~data.processors.utils.MnliMismatchedProcessor`] - [`~data.processors.utils.Sst2Processor`] - [`~data.processors.utils.StsbProcessor`] - [`~data.processors.utils.QqpProcessor`] - [`~data.processors.utils.QnliProcessor`] - [`~data.processors.utils.RteProcessor`] - [`~data.processors.utils.WnliProcessor`] 此外,还可以使用以下方法从数据文件加载值并将其转换为 [`~data.processors.utils.InputExample`] 列表。 [[autodoc]] data.processors.glue.glue_convert_examples_to_features ## XNLI [跨语言NLI语料库(XNLI)](https://www.nyu.edu/projects/bowman/xnli/) 是一个评估跨语言文本表示质量的基准测试。XNLI是一个基于[*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/)的众包数据集:”文本对“被标记为包含15种不同语言(包括英语等高资源语言和斯瓦希里语等低资源语言)的文本蕴涵注释。 它与论文 [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) 一同发布。 该库提供了加载XNLI数据的processor: - [`~data.processors.utils.XnliProcessor`] 请注意,由于测试集上有“gold”标签,因此评估是在测试集上进行的。 使用这些processor的示例在 [run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) 脚本中提供。 ## SQuAD [斯坦福问答数据集(SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) 是一个评估模型在问答上性能的基准测试。有两个版本,v1.1 和 v2.0。第一个版本(v1.1)与论文 [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250) 一同发布。第二个版本(v2.0)与论文 [Know What You Don't Know: Unanswerable Questions for SQuAD](https://arxiv.org/abs/1806.03822) 一同发布。 该库为两个版本各自提供了一个processor: ### Processors 这两个processor是: - [`~data.processors.utils.SquadV1Processor`] - [`~data.processors.utils.SquadV2Processor`] 它们都继承自抽象类 [`~data.processors.utils.SquadProcessor`]。 [[autodoc]] data.processors.squad.SquadProcessor - all 此外,可以使用以下方法将 SQuAD 示例转换为可用作模型输入的 [`~data.processors.utils.SquadFeatures`]。 [[autodoc]] data.processors.squad.squad_convert_examples_to_features 这些processor以及前面提到的方法可以与包含数据的文件以及tensorflow_datasets包一起使用。下面给出了示例。 ### Example使用 以下是使用processor以及使用数据文件的转换方法的示例: ```python # Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` 使用 *tensorflow_datasets* 就像使用数据文件一样简单: ```python # tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` 另一个使用这些processor的示例在 [run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) 脚本中提供。
transformers/docs/source/zh/main_classes/processors.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/processors.md", "repo_id": "transformers", "token_count": 3054 }
54
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 预处理 [[open-in-colab]] 在您可以在数据集上训练模型之前,数据需要被预处理为期望的模型输入格式。无论您的数据是文本、图像还是音频,它们都需要被转换并组合成批量的张量。🤗 Transformers 提供了一组预处理类来帮助准备数据以供模型使用。在本教程中,您将了解以下内容: * 对于文本,使用[分词器](./main_classes/tokenizer)(`Tokenizer`)将文本转换为一系列标记(`tokens`),并创建`tokens`的数字表示,将它们组合成张量。 * 对于语音和音频,使用[特征提取器](./main_classes/feature_extractor)(`Feature extractor`)从音频波形中提取顺序特征并将其转换为张量。 * 图像输入使用[图像处理器](./main_classes/image)(`ImageProcessor`)将图像转换为张量。 * 多模态输入,使用[处理器](./main_classes/processors)(`Processor`)结合了`Tokenizer`和`ImageProcessor`或`Processor`。 <Tip> `AutoProcessor` **始终**有效的自动选择适用于您使用的模型的正确`class`,无论您使用的是`Tokenizer`、`ImageProcessor`、`Feature extractor`还是`Processor`。 </Tip> 在开始之前,请安装🤗 Datasets,以便您可以加载一些数据集来进行实验: ```bash pip install datasets ``` ## 自然语言处理 <Youtube id="Yffk5aydLzg"/> 处理文本数据的主要工具是[Tokenizer](main_classes/tokenizer)。`Tokenizer`根据一组规则将文本拆分为`tokens`。然后将这些`tokens`转换为数字,然后转换为张量,成为模型的输入。模型所需的任何附加输入都由`Tokenizer`添加。 <Tip> 如果您计划使用预训练模型,重要的是使用与之关联的预训练`Tokenizer`。这确保文本的拆分方式与预训练语料库相同,并在预训练期间使用相同的标记-索引的对应关系(通常称为*词汇表*-`vocab`)。 </Tip> 开始使用[`AutoTokenizer.from_pretrained`]方法加载一个预训练`tokenizer`。这将下载模型预训练的`vocab`: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` 然后将您的文本传递给`tokenizer`: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` `tokenizer`返回一个包含三个重要对象的字典: * [input_ids](glossary#input-ids) 是与句子中每个`token`对应的索引。 * [attention_mask](glossary#attention-mask) 指示是否应该关注一个`token`。 * [token_type_ids](glossary#token-type-ids) 在存在多个序列时标识一个`token`属于哪个序列。 通过解码 `input_ids` 来返回您的输入: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` 如您所见,`tokenizer`向句子中添加了两个特殊`token` - `CLS` 和 `SEP`(分类器和分隔符)。并非所有模型都需要特殊`token`,但如果需要,`tokenizer`会自动为您添加。 如果有多个句子需要预处理,将它们作为列表传递给`tokenizer`: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### 填充 句子的长度并不总是相同,这可能会成为一个问题,因为模型输入的张量需要具有统一的形状。填充是一种策略,通过在较短的句子中添加一个特殊的`padding token`,以确保张量是矩形的。 将 `padding` 参数设置为 `True`,以使批次中较短的序列填充到与最长序列相匹配的长度: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` 第一句和第三句因为较短,通过`0`进行填充,。 ### 截断 另一方面,有时候一个序列可能对模型来说太长了。在这种情况下,您需要将序列截断为更短的长度。 将 `truncation` 参数设置为 `True`,以将序列截断为模型接受的最大长度: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` <Tip> 查看[填充和截断](./pad_truncation)概念指南,了解更多有关填充和截断参数的信息。 </Tip> ### 构建张量 最后,`tokenizer`可以返回实际输入到模型的张量。 将 `return_tensors` 参数设置为 `pt`(对于PyTorch)或 `tf`(对于TensorFlow): <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## 音频 对于音频任务,您需要[feature extractor](main_classes/feature_extractor)来准备您的数据集以供模型使用。`feature extractor`旨在从原始音频数据中提取特征,并将它们转换为张量。 加载[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub))以了解如何在音频数据集中使用`feature extractor`: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` 访问 `audio` 列的第一个元素以查看输入。调用 `audio` 列会自动加载和重新采样音频文件: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 这会返回三个对象: * `array` 是加载的语音信号 - 并在必要时重新采为`1D array`。 * `path` 指向音频文件的位置。 * `sampling_rate` 是每秒测量的语音信号数据点数量。 对于本教程,您将使用[Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base)模型。查看模型卡片,您将了解到Wav2Vec2是在16kHz采样的语音音频数据上预训练的。重要的是,您的音频数据的采样率要与用于预训练模型的数据集的采样率匹配。如果您的数据的采样率不同,那么您需要对数据进行重新采样。 1. 使用🤗 Datasets的[`~datasets.Dataset.cast_column`]方法将采样率提升到16kHz: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. 再次调用 `audio` 列以重新采样音频文件: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` 接下来,加载一个`feature extractor`以对输入进行标准化和填充。当填充文本数据时,会为较短的序列添加 `0`。相同的理念适用于音频数据。`feature extractor`添加 `0` - 被解释为静音 - 到`array` 。 使用 [`AutoFeatureExtractor.from_pretrained`] 加载`feature extractor`: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` 将音频 `array` 传递给`feature extractor`。我们还建议在`feature extractor`中添加 `sampling_rate` 参数,以更好地调试可能发生的静音错误: ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` 就像`tokenizer`一样,您可以应用填充或截断来处理批次中的可变序列。请查看这两个音频样本的序列长度: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` 创建一个函数来预处理数据集,以使音频样本具有相同的长度。通过指定最大样本长度,`feature extractor`将填充或截断序列以使其匹配: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` 将`preprocess_function`应用于数据集中的前几个示例: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` 现在样本长度是相同的,并且与指定的最大长度匹配。您现在可以将经过处理的数据集传递给模型了! ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` ## 计算机视觉 对于计算机视觉任务,您需要一个[ image processor](main_classes/image_processor)来准备数据集以供模型使用。图像预处理包括多个步骤将图像转换为模型期望输入的格式。这些步骤包括但不限于调整大小、标准化、颜色通道校正以及将图像转换为张量。 <Tip> 图像预处理通常遵循某种形式的图像增强。图像预处理和图像增强都会改变图像数据,但它们有不同的目的: * 图像增强可以帮助防止过拟合并增加模型的鲁棒性。您可以在数据增强方面充分发挥创造性 - 调整亮度和颜色、裁剪、旋转、调整大小、缩放等。但要注意不要改变图像的含义。 * 图像预处理确保图像与模型预期的输入格式匹配。在微调计算机视觉模型时,必须对图像进行与模型训练时相同的预处理。 您可以使用任何您喜欢的图像增强库。对于图像预处理,请使用与模型相关联的`ImageProcessor`。 </Tip> 加载[food101](https://huggingface.co/datasets/food101)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets教程](https://huggingface.co/docs/datasets/load_hub))以了解如何在计算机视觉数据集中使用图像处理器: <Tip> 因为数据集相当大,请使用🤗 Datasets的`split`参数加载训练集中的少量样本! </Tip> ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` 接下来,使用🤗 Datasets的[`Image`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image)功能查看图像: ```py >>> dataset[0]["image"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png"/> </div> 使用 [`AutoImageProcessor.from_pretrained`] 加载`image processor`: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` 首先,让我们进行图像增强。您可以使用任何您喜欢的库,但在本教程中,我们将使用torchvision的[`transforms`](https://pytorch.org/vision/stable/transforms.html)模块。如果您有兴趣使用其他数据增强库,请参阅[Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)或[Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)中的示例。 1. 在这里,我们使用[`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html)将[`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html)和 [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html)变换连接在一起。请注意,对于调整大小,我们可以从`image_processor`中获取图像尺寸要求。对于一些模型,精确的高度和宽度需要被定义,对于其他模型只需定义`shortest_edge`。 ```py >>> from torchvision.transforms import RandomResizedCrop, ColorJitter, Compose >>> size = ( ... image_processor.size["shortest_edge"] ... if "shortest_edge" in image_processor.size ... else (image_processor.size["height"], image_processor.size["width"]) ... ) >>> _transforms = Compose([RandomResizedCrop(size), ColorJitter(brightness=0.5, hue=0.5)]) ``` 2. 模型接受 [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) 作为输入。`ImageProcessor` 可以进行图像的标准化,并生成适当的张量。创建一个函数,将图像增强和图像预处理步骤组合起来处理批量图像,并生成 `pixel_values`: ```py >>> def transforms(examples): ... images = [_transforms(img.convert("RGB")) for img in examples["image"]] ... examples["pixel_values"] = image_processor(images, do_resize=False, return_tensors="pt")["pixel_values"] ... return examples ``` <Tip> 在上面的示例中,我们设置`do_resize=False`,因为我们已经在图像增强转换中调整了图像的大小,并利用了适当的`image_processor`的`size`属性。如果您在图像增强期间不调整图像的大小,请将此参数排除在外。默认情况下`ImageProcessor`将处理调整大小。 如果希望将图像标准化步骤为图像增强的一部分,请使用`image_processor.image_mean`和`image_processor.image_std`。 </Tip> 3. 然后使用🤗 Datasets的[`set_transform`](https://huggingface.co/docs/datasets/process#format-transform)在运行时应用这些变换: ```py >>> dataset.set_transform(transforms) ``` 4. 现在,当您访问图像时,您将注意到`image processor`已添加了 `pixel_values`。您现在可以将经过处理的数据集传递给模型了! ```py >>> dataset[0].keys() ``` 这是在应用变换后的图像样子。图像已被随机裁剪,并其颜色属性发生了变化。 ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png"/> </div> <Tip> 对于诸如目标检测、语义分割、实例分割和全景分割等任务,`ImageProcessor`提供了训练后处理方法。这些方法将模型的原始输出转换为有意义的预测,如边界框或分割地图。 </Tip> ### 填充 在某些情况下,例如,在微调[DETR](./model_doc/detr)时,模型在训练时应用了尺度增强。这可能导致批处理中的图像大小不同。您可以使用[`DetrImageProcessor.pad`]来指定自定义的`collate_fn`将图像批处理在一起。 ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## 多模态 对于涉及多模态输入的任务,您需要[processor](main_classes/processors)来为模型准备数据集。`processor`将两个处理对象-例如`tokenizer`和`feature extractor`-组合在一起。 加载[LJ Speech](https://huggingface.co/datasets/lj_speech)数据集(有关如何加载数据集的更多详细信息,请参阅🤗 [Datasets 教程](https://huggingface.co/docs/datasets/load_hub))以了解如何使用`processor`进行自动语音识别(ASR): ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` 对于ASR(自动语音识别),主要关注`audio`和`text`,因此可以删除其他列: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` 现在查看`audio`和`text`列: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` 请记住,您应始终[重新采样](preprocessing#audio)音频数据集的采样率,以匹配用于预训练模型数据集的采样率! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` 使用[`AutoProcessor.from_pretrained`]加载一个`processor`: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. 创建一个函数,用于将包含在 `array` 中的音频数据处理为 `input_values`,并将 `text` 标记为 `labels`。这些将是输入模型的数据: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. 将 `prepare_dataset` 函数应用于一个示例: ```py >>> prepare_dataset(lj_speech[0]) ``` `processor`现在已经添加了 `input_values` 和 `labels`,并且采样率也正确降低为为16kHz。现在可以将处理后的数据集传递给模型!
transformers/docs/source/zh/preprocessing.md/0
{ "file_path": "transformers/docs/source/zh/preprocessing.md", "repo_id": "transformers", "token_count": 12751 }
55
# Image Captioning (vision-encoder-text-decoder model) training example The following example showcases how to finetune a vision-encoder-text-decoder model for image captioning using the JAX/Flax backend, leveraging 🤗 Transformers library's [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/vision-encoder-decoder#transformers.FlaxVisionEncoderDecoderModel). JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. `run_image_captioning_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then fine-tune one of the architectures above on it. For custom datasets in `jsonlines` format please see: https://huggingface.co/docs/datasets/loading_datasets#json-files and you also will find examples of these below. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the COCO dataset before training. ```bash mkdir data cd data wget http://images.cocodataset.org/zips/train2017.zip wget http://images.cocodataset.org/zips/val2017.zip wget http://images.cocodataset.org/zips/test2017.zip wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip wget http://images.cocodataset.org/annotations/image_info_test2017.zip cd .. ``` ### Create a model from a vision encoder model and a text decoder model Next, we create a [FlaxVisionEncoderDecoderModel](https://huggingface.co/docs/transformers/model_doc/visionencoderdecoder#transformers.FlaxVisionEncoderDecoderModel) instance from a pre-trained vision encoder ([ViT](https://huggingface.co/docs/transformers/model_doc/vit#transformers.FlaxViTModel)) and a pre-trained text decoder ([GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2#transformers.FlaxGPT2Model)): ```bash python3 create_model_from_encoder_decoder_models.py \ --output_dir model \ --encoder_model_name_or_path google/vit-base-patch16-224-in21k \ --decoder_model_name_or_path openai-community/gpt2 ``` ### Train the model Finally, we can run the example script to train the model: ```bash python3 run_image_captioning_flax.py \ --output_dir ./image-captioning-training-results \ --model_name_or_path model \ --dataset_name ydshieh/coco_dataset_script \ --dataset_config_name=2017 \ --data_dir $PWD/data \ --image_column image_path \ --caption_column caption \ --do_train --do_eval --predict_with_generate \ --num_train_epochs 1 \ --eval_steps 500 \ --learning_rate 3e-5 --warmup_steps 0 \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 32 \ --overwrite_output_dir \ --max_target_length 32 \ --num_beams 8 \ --preprocessing_num_workers 16 \ --logging_steps 10 \ --block_size 16384 \ --push_to_hub ``` This should finish in about 1h30 on Cloud TPU, with validation loss and ROUGE2 score of 2.0153 and 14.64 respectively after 1 epoch. Training statistics can be accessed on [Models](https://huggingface.co/ydshieh/image-captioning-training-results/tensorboard).
transformers/examples/flax/image-captioning/README.md/0
{ "file_path": "transformers/examples/flax/image-captioning/README.md", "repo_id": "transformers", "token_count": 1084 }
56
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # 🤗 Benchmark results Here, you can find a list of the different benchmark results created by the community. If you would like to list benchmark results on your favorite models of the [model hub](https://huggingface.co/models) here, please open a Pull Request and add it below. | Benchmark description | Results | Environment info | Author | |:----------|:-------------|:-------------|------:| | PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[memory](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_memory.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Patrick von Platen](https://github.com/patrickvonplaten) | | PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[time](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_time.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Patrick von Platen](https://github.com/patrickvonplaten) |
transformers/examples/legacy/benchmarking/README.md/0
{ "file_path": "transformers/examples/legacy/benchmarking/README.md", "repo_id": "transformers", "token_count": 499 }
57
#!/usr/bin/env python import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def fill_mask(masked_input, model, tokenizer, topk=5): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>") == 1 input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() logits = logits[0, masked_index, :] prob = logits.softmax(dim=0) values, indices = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))] ) masked_token = tokenizer.mask_token topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" ")): predicted_token = predicted_token_bpe.replace("\u2581", " ") if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(masked_token), predicted_token), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, ) ) return topk_filled_outputs tokenizer = CamembertTokenizer.from_pretrained("almanach/camembert-base") model = CamembertForMaskedLM.from_pretrained("almanach/camembert-base") model.eval() masked_input = "Le camembert est <mask> :)" print(fill_mask(masked_input, model, tokenizer, topk=3))
transformers/examples/legacy/run_camembert.py/0
{ "file_path": "transformers/examples/legacy/run_camembert.py", "repo_id": "transformers", "token_count": 888 }
58
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu filename = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: bleu_data = json.load(f) @require_torch class ModelEvalTester(unittest.TestCase): def get_tokenizer(self, mname): return FSMTTokenizer.from_pretrained(mname) def get_model(self, mname): model = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 26.0], ["ru-en", 22.0], ["en-de", 22.0], ["de-en", 29.0], ] ) @slow def test_bleu_scores(self, pair, min_bleu_score): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality mname = f"facebook/wmt19-{pair}" tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) src_sentences = bleu_data[pair]["src"] tgt_sentences = bleu_data[pair]["tgt"] batch = tokenizer(src_sentences, return_tensors="pt", truncation=True, padding="longest").to(torch_device) outputs = model.generate( input_ids=batch.input_ids, num_beams=8, ) decoded_sentences = tokenizer.batch_decode( outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False ) scores = calculate_bleu(decoded_sentences, tgt_sentences) print(scores) self.assertGreaterEqual(scores["bleu"], min_bleu_score)
transformers/examples/legacy/seq2seq/old_test_fsmt_bleu_score.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_fsmt_bleu_score.py", "repo_id": "transformers", "token_count": 997 }
59
#!/usr/bin/env python import io import json import subprocess pairs = [ ["en", "ru"], ["ru", "en"], ["en", "de"], ["de", "en"], ] n_objs = 8 def get_all_data(pairs, n_objs): text = {} for src, tgt in pairs: pair = f"{src}-{tgt}" cmd = f"sacrebleu -t wmt19 -l {pair} --echo src".split() src_lines = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode("utf-8").splitlines() cmd = f"sacrebleu -t wmt19 -l {pair} --echo ref".split() tgt_lines = subprocess.run(cmd, stdout=subprocess.PIPE).stdout.decode("utf-8").splitlines() text[pair] = {"src": src_lines[:n_objs], "tgt": tgt_lines[:n_objs]} return text text = get_all_data(pairs, n_objs) filename = "./fsmt_val_data.json" with io.open(filename, "w", encoding="utf-8") as f: bleu_data = json.dump(text, f, indent=2, ensure_ascii=False)
transformers/examples/legacy/seq2seq/test_data/fsmt/build-eval-data.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/test_data/fsmt/build-eval-data.py", "repo_id": "transformers", "token_count": 410 }
60
## Token classification Based on the scripts [`run_ner.py`](https://github.com/huggingface/transformers/blob/main/examples/legacy/token-classification/run_ner.py). The following examples are covered in this section: * NER on the GermEval 2014 (German NER) dataset * Emerging and Rare Entities task: WNUT’17 (English NER) dataset Details and results for the fine-tuning provided by @stefan-it. ### GermEval 2014 (German NER) dataset #### Data (Download and pre-processing steps) Data can be obtained from the [GermEval 2014](https://sites.google.com/site/germeval2014ner/data) shared task page. Here are the commands for downloading and pre-processing train, dev and test datasets. The original data format has four (tab-separated) columns, in a pre-processing step only the two relevant columns (token and outer span NER annotation) are extracted: ```bash curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > dev.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > test.txt.tmp ``` The GermEval 2014 dataset contains some strange "control character" tokens like `'\x96', '\u200e', '\x95', '\xad' or '\x80'`. One problem with these tokens is, that `BertTokenizer` returns an empty token for them, resulting in misaligned `InputExample`s. The `preprocess.py` script located in the `scripts` folder a) filters these tokens and b) splits longer sentences into smaller ones (once the max. subtoken length is reached). Let's define some variables that we need for further pre-processing steps and training the model: ```bash export MAX_LENGTH=128 export BERT_MODEL=google-bert/bert-base-multilingual-cased ``` Run the pre-processing script on training, dev and test datasets: ```bash python3 scripts/preprocess.py train.txt.tmp $BERT_MODEL $MAX_LENGTH > train.txt python3 scripts/preprocess.py dev.txt.tmp $BERT_MODEL $MAX_LENGTH > dev.txt python3 scripts/preprocess.py test.txt.tmp $BERT_MODEL $MAX_LENGTH > test.txt ``` The GermEval 2014 dataset has much more labels than CoNLL-2002/2003 datasets, so an own set of labels must be used: ```bash cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt ``` #### Prepare the run Additional environment variables must be set: ```bash export OUTPUT_DIR=germeval-model export BATCH_SIZE=32 export NUM_EPOCHS=3 export SAVE_STEPS=750 export SEED=1 ``` #### Run the Pytorch version To start training, just run: ```bash python3 run_ner.py --data_dir ./ \ --labels ./labels.txt \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --num_train_epochs $NUM_EPOCHS \ --per_device_train_batch_size $BATCH_SIZE \ --save_steps $SAVE_STEPS \ --seed $SEED \ --do_train \ --do_eval \ --do_predict ``` If your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. #### JSON-based configuration file Instead of passing all parameters via commandline arguments, the `run_ner.py` script also supports reading parameters from a json-based configuration file: ```json { "data_dir": ".", "labels": "./labels.txt", "model_name_or_path": "google-bert/bert-base-multilingual-cased", "output_dir": "germeval-model", "max_seq_length": 128, "num_train_epochs": 3, "per_device_train_batch_size": 32, "save_steps": 750, "seed": 1, "do_train": true, "do_eval": true, "do_predict": true } ``` It must be saved with a `.json` extension and can be used by running `python3 run_ner.py config.json`. #### Evaluation Evaluation on development dataset outputs the following for our example: ```bash 10/04/2019 00:42:06 - INFO - __main__ - ***** Eval results ***** 10/04/2019 00:42:06 - INFO - __main__ - f1 = 0.8623348017621146 10/04/2019 00:42:06 - INFO - __main__ - loss = 0.07183869666975543 10/04/2019 00:42:06 - INFO - __main__ - precision = 0.8467916366258111 10/04/2019 00:42:06 - INFO - __main__ - recall = 0.8784592370979806 ``` On the test dataset the following results could be achieved: ```bash 10/04/2019 00:42:42 - INFO - __main__ - ***** Eval results ***** 10/04/2019 00:42:42 - INFO - __main__ - f1 = 0.8614389652384803 10/04/2019 00:42:42 - INFO - __main__ - loss = 0.07064602487454782 10/04/2019 00:42:42 - INFO - __main__ - precision = 0.8604651162790697 10/04/2019 00:42:42 - INFO - __main__ - recall = 0.8624150210424085 ``` #### Run the Tensorflow 2 version To start training, just run: ```bash python3 run_tf_ner.py --data_dir ./ \ --labels ./labels.txt \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --num_train_epochs $NUM_EPOCHS \ --per_device_train_batch_size $BATCH_SIZE \ --save_steps $SAVE_STEPS \ --seed $SEED \ --do_train \ --do_eval \ --do_predict ``` Such as the Pytorch version, if your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. #### Evaluation Evaluation on development dataset outputs the following for our example: ```bash precision recall f1-score support LOCderiv 0.7619 0.6154 0.6809 52 PERpart 0.8724 0.8997 0.8858 4057 OTHpart 0.9360 0.9466 0.9413 711 ORGpart 0.7015 0.6989 0.7002 269 LOCpart 0.7668 0.8488 0.8057 496 LOC 0.8745 0.9191 0.8963 235 ORGderiv 0.7723 0.8571 0.8125 91 OTHderiv 0.4800 0.6667 0.5581 18 OTH 0.5789 0.6875 0.6286 16 PERderiv 0.5385 0.3889 0.4516 18 PER 0.5000 0.5000 0.5000 2 ORG 0.0000 0.0000 0.0000 3 micro avg 0.8574 0.8862 0.8715 5968 macro avg 0.8575 0.8862 0.8713 5968 ``` On the test dataset the following results could be achieved: ```bash precision recall f1-score support PERpart 0.8847 0.8944 0.8896 9397 OTHpart 0.9376 0.9353 0.9365 1639 ORGpart 0.7307 0.7044 0.7173 697 LOC 0.9133 0.9394 0.9262 561 LOCpart 0.8058 0.8157 0.8107 1150 ORG 0.0000 0.0000 0.0000 8 OTHderiv 0.5882 0.4762 0.5263 42 PERderiv 0.6571 0.5227 0.5823 44 OTH 0.4906 0.6667 0.5652 39 ORGderiv 0.7016 0.7791 0.7383 172 LOCderiv 0.8256 0.6514 0.7282 109 PER 0.0000 0.0000 0.0000 11 micro avg 0.8722 0.8774 0.8748 13869 macro avg 0.8712 0.8774 0.8740 13869 ``` ### Emerging and Rare Entities task: WNUT’17 (English NER) dataset Description of the WNUT’17 task from the [shared task website](http://noisy-text.github.io/2017/index.html): > The WNUT’17 shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions. > Named entities form the basis of many modern approaches to other tasks (like event clustering and summarization), but recall on > them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms. Six labels are available in the dataset. An overview can be found on this [page](http://noisy-text.github.io/2017/files/). #### Data (Download and pre-processing steps) The dataset can be downloaded from the [official GitHub](https://github.com/leondz/emerging_entities_17) repository. The following commands show how to prepare the dataset for fine-tuning: ```bash mkdir -p data_wnut_17 curl -L 'https://github.com/leondz/emerging_entities_17/raw/master/wnut17train.conll' | tr '\t' ' ' > data_wnut_17/train.txt.tmp curl -L 'https://github.com/leondz/emerging_entities_17/raw/master/emerging.dev.conll' | tr '\t' ' ' > data_wnut_17/dev.txt.tmp curl -L 'https://raw.githubusercontent.com/leondz/emerging_entities_17/master/emerging.test.annotated' | tr '\t' ' ' > data_wnut_17/test.txt.tmp ``` Let's define some variables that we need for further pre-processing steps: ```bash export MAX_LENGTH=128 export BERT_MODEL=google-bert/bert-large-cased ``` Here we use the English BERT large model for fine-tuning. The `preprocess.py` scripts splits longer sentences into smaller ones (once the max. subtoken length is reached): ```bash python3 scripts/preprocess.py data_wnut_17/train.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/train.txt python3 scripts/preprocess.py data_wnut_17/dev.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/dev.txt python3 scripts/preprocess.py data_wnut_17/test.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/test.txt ``` In the last pre-processing step, the `labels.txt` file needs to be generated. This file contains all available labels: ```bash cat data_wnut_17/train.txt data_wnut_17/dev.txt data_wnut_17/test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > data_wnut_17/labels.txt ``` #### Run the Pytorch version Fine-tuning with the PyTorch version can be started using the `run_ner.py` script. In this example we use a JSON-based configuration file. This configuration file looks like: ```json { "data_dir": "./data_wnut_17", "labels": "./data_wnut_17/labels.txt", "model_name_or_path": "google-bert/bert-large-cased", "output_dir": "wnut-17-model-1", "max_seq_length": 128, "num_train_epochs": 3, "per_device_train_batch_size": 32, "save_steps": 425, "seed": 1, "do_train": true, "do_eval": true, "do_predict": true, "fp16": false } ``` If your GPU supports half-precision training, please set `fp16` to `true`. Save this JSON-based configuration under `wnut_17.json`. The fine-tuning can be started with `python3 run_ner_old.py wnut_17.json`. #### Evaluation Evaluation on development dataset outputs the following: ```bash 05/29/2020 23:33:44 - INFO - __main__ - ***** Eval results ***** 05/29/2020 23:33:44 - INFO - __main__ - eval_loss = 0.26505235286212275 05/29/2020 23:33:44 - INFO - __main__ - eval_precision = 0.7008264462809918 05/29/2020 23:33:44 - INFO - __main__ - eval_recall = 0.507177033492823 05/29/2020 23:33:44 - INFO - __main__ - eval_f1 = 0.5884802220680084 05/29/2020 23:33:44 - INFO - __main__ - epoch = 3.0 ``` On the test dataset the following results could be achieved: ```bash 05/29/2020 23:33:44 - INFO - transformers.trainer - ***** Running Prediction ***** 05/29/2020 23:34:02 - INFO - __main__ - eval_loss = 0.30948806500973547 05/29/2020 23:34:02 - INFO - __main__ - eval_precision = 0.5840108401084011 05/29/2020 23:34:02 - INFO - __main__ - eval_recall = 0.3994439295644115 05/29/2020 23:34:02 - INFO - __main__ - eval_f1 = 0.47440836543753434 ``` WNUT’17 is a very difficult task. Current state-of-the-art results on this dataset can be found [here](https://nlpprogress.com/english/named_entity_recognition.html).
transformers/examples/legacy/token-classification/README.md/0
{ "file_path": "transformers/examples/legacy/token-classification/README.md", "repo_id": "transformers", "token_count": 4566 }
61
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # This file was automatically generated from examples/modular-transformers/modular_add_function.py. # Do NOT edit this file manually as any edits will be overwritten by the generation of # the file from the modular. If any change should be done, please apply the change to the # modular_add_function.py file directly. One of our CI enforces this. # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨 # Note that zamba does not have the `apply_rotary_pos_emb` function! from typing import Optional, Tuple import torch from torch import nn def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class TestAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer and "Generating Long Sequences with Sparse Transformers". Adapted from transformers.models.mistral.modeling_mistral.MistralAttention: The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads. The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer (see fig. 2 in https://arxiv.org/pdf/2405.16712). Additionally, replaced attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2) """ def __init__(self): pass def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: _ = apply_rotary_pos_emb(1, 1, 1, 1)
transformers/examples/modular-transformers/modeling_add_function.py/0
{ "file_path": "transformers/examples/modular-transformers/modeling_add_function.py", "repo_id": "transformers", "token_count": 1596 }
62
""" Here, because clip is not consistent with the use of the "Text" and "Vision" prefixes, we cannot simply use ``` class Multimodal2VisionModel(CLIPVisionModel): pass ``` with the hope that all dependencies will be renamed as `Multimodal2VisionClass`. For this reason, if we want consistency and use the "Vision" part everywhere, we need to overwrite the intermediate classes and add the prefix everytime. This adds noise to the modular, but is unfortunately unavoidable. """ from torch import nn from transformers.models.clip.modeling_clip import ( CLIPMLP, CLIPAttention, CLIPEncoder, CLIPEncoderLayer, CLIPFlashAttention2, CLIPPreTrainedModel, CLIPSdpaAttention, CLIPVisionModel, CLIPVisionTransformer, ) from transformers.utils import add_start_docstrings class Multimodal2VisionAttention(CLIPAttention): pass # Check that adding the second base class correctly set the parent, even though in clip it does not have the "Vision" part class Multimodal2VisionSdpaAttention(CLIPSdpaAttention, Multimodal2VisionAttention): pass # Check that adding the second base class correctly set the parent, even though in clip it does not have the "Vision" part class Multimodal2VisionFlashAttention2(CLIPFlashAttention2, Multimodal2VisionAttention): pass MULTIMODAL2_VISION_ATTENTION_CLASSES = { "eager": Multimodal2VisionAttention, "sdpa": Multimodal2VisionSdpaAttention, "flash_attention_2": Multimodal2VisionFlashAttention2, } class Multimodal2VisionMLP(CLIPMLP): pass class Multimodal2VisionEncoderLayer(CLIPEncoderLayer): def __init__(self, config): super().__init__() self.self_attn = MULTIMODAL2_VISION_ATTENTION_CLASSES[config._attn_implementation](config) self.mlp = Multimodal2VisionMLP(config) class Multimodal2VisionEncoder(CLIPEncoder): def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([Multimodal2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) # Finally here the `Vision` part was correct in CLIP, but we still need to tell it that the encoder arg should use it as well class Multimodal2VisionTransformer(CLIPVisionTransformer): def __init__(self, config): super().__init__(config) self.encoder = Multimodal2VisionEncoder(config) class Multimodal2VisionPreTrainedModel(CLIPPreTrainedModel): def _init_weights(self, module): if isinstance(module, Multimodal2VisionMLP): pass MULTIMODAL2_VISION_START_DOCSTRING = "doc" # Here the only arg `self.vision_model = CLIPVisionTransformer(config)` in CLIPVisionModel already has the "Vision" part, so # no need to overwrite it, it will look for `Multimodal2VisionTransformer` which has already being redefined above # Note: we may want to redefine decorator as well for full consistency, as CLIP does not use "CLIP_VISION_START_DOCSTRING" but only # "CLIP_START_DOCSTRING" @add_start_docstrings("New doc", MULTIMODAL2_VISION_START_DOCSTRING) class Multimodal2VisionModel(CLIPVisionModel, Multimodal2VisionPreTrainedModel): _no_split_modules = ["Multimodal2VisionEncoderLayer"]
transformers/examples/modular-transformers/modular_multimodal2.py/0
{ "file_path": "transformers/examples/modular-transformers/modular_multimodal2.py", "repo_id": "transformers", "token_count": 1094 }
63
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning 🤗 Transformers model for object detection with Accelerate.""" import argparse import json import logging import math import os from functools import partial from pathlib import Path from typing import Any, List, Mapping, Tuple, Union import albumentations as A import datasets import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from torchmetrics.detection.mean_ap import MeanAveragePrecision from tqdm.auto import tqdm import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForObjectDetection, SchedulerType, get_scheduler, ) from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") logging.basicConfig(level=logging.INFO) logger = get_logger(__name__) require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") # Copied from examples/pytorch/object-detection/run_object_detection.format_image_annotations_as_coco def format_image_annotations_as_coco( image_id: str, categories: List[int], areas: List[float], bboxes: List[Tuple[float]] ) -> dict: """Format one set of image annotations to the COCO format Args: image_id (str): image id. e.g. "0001" categories (List[int]): list of categories/class labels corresponding to provided bounding boxes areas (List[float]): list of corresponding areas to provided bounding boxes bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format ([center_x, center_y, width, height] in absolute coordinates) Returns: dict: { "image_id": image id, "annotations": list of formatted annotations } """ annotations = [] for category, area, bbox in zip(categories, areas, bboxes): formatted_annotation = { "image_id": image_id, "category_id": category, "iscrowd": 0, "area": area, "bbox": list(bbox), } annotations.append(formatted_annotation) return { "image_id": image_id, "annotations": annotations, } # Copied from examples/pytorch/object-detection/run_object_detection.convert_bbox_yolo_to_pascal def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: """ Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1] to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates. Args: boxes (torch.Tensor): Bounding boxes in YOLO format image_size (Tuple[int, int]): Image size in format (height, width) Returns: torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max) """ # convert center to corners format boxes = center_to_corners_format(boxes) # convert to absolute coordinates height, width = image_size boxes = boxes * torch.tensor([[width, height, width, height]]) return boxes # Copied from examples/pytorch/object-detection/run_object_detection.augment_and_transform_batch def augment_and_transform_batch( examples: Mapping[str, Any], transform: A.Compose, image_processor: AutoImageProcessor, return_pixel_mask: bool = False, ) -> BatchFeature: """Apply augmentations and format annotations in COCO format for object detection task""" images = [] annotations = [] for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]): image = np.array(image.convert("RGB")) # apply augmentations output = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) images.append(output["image"]) # format annotations in COCO format formatted_annotations = format_image_annotations_as_coco( image_id, output["category"], objects["area"], output["bboxes"] ) annotations.append(formatted_annotations) # Apply the image processor transformations: resizing, rescaling, normalization result = image_processor(images=images, annotations=annotations, return_tensors="pt") if not return_pixel_mask: result.pop("pixel_mask", None) return result # Copied from examples/pytorch/object-detection/run_object_detection.collate_fn def collate_fn(batch: List[BatchFeature]) -> Mapping[str, Union[torch.Tensor, List[Any]]]: data = {} data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch]) data["labels"] = [x["labels"] for x in batch] if "pixel_mask" in batch[0]: data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch]) return data def nested_to_cpu(objects): """Move nested tesnors in objects to CPU if they are on GPU""" if isinstance(objects, torch.Tensor): return objects.cpu() elif isinstance(objects, Mapping): return type(objects)({k: nested_to_cpu(v) for k, v in objects.items()}) elif isinstance(objects, (list, tuple)): return type(objects)([nested_to_cpu(v) for v in objects]) elif isinstance(objects, (np.ndarray, str, int, float, bool)): return objects raise ValueError(f"Unsupported type {type(objects)}") def evaluation_loop( model: torch.nn.Module, image_processor: AutoImageProcessor, accelerator: Accelerator, dataloader: DataLoader, id2label: Mapping[int, str], ) -> dict: model.eval() metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True) for step, batch in enumerate(tqdm(dataloader, disable=not accelerator.is_local_main_process)): with torch.no_grad(): outputs = model(**batch) # For metric computation we need to collect ground truth and predicted boxes in the same format # 1. Collect predicted boxes, classes, scores # image_processor convert boxes from YOLO format to Pascal VOC format # ([x_min, y_min, x_max, y_max] in absolute coordinates) image_size = torch.stack([example["orig_size"] for example in batch["labels"]], dim=0) predictions = image_processor.post_process_object_detection(outputs, threshold=0.0, target_sizes=image_size) predictions = nested_to_cpu(predictions) # 2. Collect ground truth boxes in the same format for metric computation # Do the same, convert YOLO boxes to Pascal VOC format target = [] for label in batch["labels"]: label = nested_to_cpu(label) boxes = convert_bbox_yolo_to_pascal(label["boxes"], label["orig_size"]) labels = label["class_labels"] target.append({"boxes": boxes, "labels": labels}) metric.update(predictions, target) metrics = metric.compute() # Replace list of per class metrics with separate metric for each class classes = metrics.pop("classes") map_per_class = metrics.pop("map_per_class") mar_100_per_class = metrics.pop("mar_100_per_class") for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class): class_name = id2label[class_id.item()] metrics[f"map_{class_name}"] = class_map metrics[f"mar_100_{class_name}"] = class_mar # Convert metrics to float metrics = {k: round(v.item(), 4) for k, v in metrics.items()} return metrics def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model for object detection task") parser.add_argument( "--model_name_or_path", type=str, help="Path to a pretrained model or model identifier from huggingface.co/models.", default="facebook/detr-resnet-50", ) parser.add_argument( "--dataset_name", type=str, help="Name of the dataset on the hub.", default="cppe-5", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Fraction of the dataset to be used for validation.", ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Ignore mismatched sizes between the model and the dataset.", ) parser.add_argument( "--image_square_size", type=int, default=1333, help="Image longest size will be resized to this value, then image will be padded to square.", ) parser.add_argument( "--use_fast", type=bool, default=True, help="Use a fast torchvision-base image processor if it is supported for a given model.", ) parser.add_argument( "--cache_dir", type=str, help="Path to a folder in which the model and dataset will be cached.", ) parser.add_argument( "--use_auth_token", action="store_true", help="Whether to use an authentication token to access the model repository.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=4, help="Number of workers to use for the dataloaders.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", required=False, action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_object_detection_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. # We set device_specific to True as we want different data augmentation per device. if args.seed is not None: set_seed(args.seed, device_specific=True) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split, seed=args.seed) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Get dataset categories and prepare mappings for label_name <-> label_id categories = dataset["train"].features["objects"].feature["category"].names id2label = dict(enumerate(categories)) label2id = {v: k for k, v in id2label.items()} # ------------------------------------------------------------------------------------------------ # Load pretrained config, model and image processor # ------------------------------------------------------------------------------------------------ common_pretrained_args = { "cache_dir": args.cache_dir, "token": args.hub_token, "trust_remote_code": args.trust_remote_code, } config = AutoConfig.from_pretrained( args.model_name_or_path, label2id=label2id, id2label=id2label, **common_pretrained_args ) model = AutoModelForObjectDetection.from_pretrained( args.model_name_or_path, config=config, ignore_mismatched_sizes=args.ignore_mismatched_sizes, **common_pretrained_args, ) image_processor = AutoImageProcessor.from_pretrained( args.model_name_or_path, do_resize=True, size={"max_height": args.image_square_size, "max_width": args.image_square_size}, do_pad=True, pad_size={"height": args.image_square_size, "width": args.image_square_size}, use_fast=args.use_fast, **common_pretrained_args, ) # ------------------------------------------------------------------------------------------------ # Define image augmentations and dataset transforms # ------------------------------------------------------------------------------------------------ max_size = args.image_square_size train_augment_and_transform = A.Compose( [ A.Compose( [ A.SmallestMaxSize(max_size=max_size, p=1.0), A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0), ], p=0.2, ), A.OneOf( [ A.Blur(blur_limit=7, p=0.5), A.MotionBlur(blur_limit=7, p=0.5), A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1), ], p=0.1, ), A.Perspective(p=0.1), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.5), A.HueSaturationValue(p=0.1), ], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25), ) validation_transform = A.Compose( [A.NoOp()], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True), ) # Make transform functions for batch and apply for dataset splits train_transform_batch = partial( augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor ) validation_transform_batch = partial( augment_and_transform_batch, transform=validation_transform, image_processor=image_processor ) with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(train_transform_batch) valid_dataset = dataset["validation"].with_transform(validation_transform_batch) test_dataset = dataset["test"].with_transform(validation_transform_batch) dataloader_common_args = { "num_workers": args.dataloader_num_workers, "collate_fn": collate_fn, } train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=args.per_device_train_batch_size, **dataloader_common_args ) valid_dataloader = DataLoader( valid_dataset, shuffle=False, batch_size=args.per_device_eval_batch_size, **dataloader_common_args ) test_dataloader = DataLoader( test_dataset, shuffle=False, batch_size=args.per_device_eval_batch_size, **dataloader_common_args ) # ------------------------------------------------------------------------------------------------ # Define optimizer, scheduler and prepare everything with the accelerator # ------------------------------------------------------------------------------------------------ # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, valid_dataloader, test_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, test_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("object_detection_no_trainer", experiment_config) # ------------------------------------------------------------------------------------------------ # Run training with evaluation on each epoch # ------------------------------------------------------------------------------------------------ total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if completed_steps >= args.max_train_steps: break logger.info("***** Running evaluation *****") metrics = evaluation_loop(model, image_processor, accelerator, valid_dataloader, id2label) logger.info(f"epoch {epoch}: {metrics}") if args.with_tracking: accelerator.log( { "train_loss": total_loss.item() / len(train_dataloader), **metrics, "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) # ------------------------------------------------------------------------------------------------ # Run evaluation on test dataset and save the model # ------------------------------------------------------------------------------------------------ logger.info("***** Running evaluation on test dataset *****") metrics = evaluation_loop(model, image_processor, accelerator, test_dataloader, id2label) metrics = {f"test_{k}": v for k, v in metrics.items()} logger.info(f"Test metrics: {metrics}") if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(metrics, f, indent=2) image_processor.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ignore_patterns=["epoch_*"], ) if __name__ == "__main__": main()
transformers/examples/pytorch/object-detection/run_object_detection_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/object-detection/run_object_detection_no_trainer.py", "repo_id": "transformers", "token_count": 13163 }
64
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Speech Recognition Pre-Training ## Wav2Vec2 Speech Pre-Training The script [`run_speech_wav2vec2_pretraining_no_trainer.py`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py) can be used to pre-train a [Wav2Vec2](https://huggingface.co/transformers/model_doc/wav2vec2.html?highlight=wav2vec2) model from scratch. In the script [`run_speech_wav2vec2_pretraining_no_trainer`](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py), a Wav2Vec2 model is pre-trained on audio data alone using [Wav2Vec2's contrastive loss objective](https://arxiv.org/abs/2006.11477). The following examples show how to fine-tune a `"base"`-sized Wav2Vec2 model as well as a `"large"`-sized Wav2Vec2 model using [`accelerate`](https://github.com/huggingface/accelerate). --- **NOTE 1** Wav2Vec2's pre-training is known to be quite unstable. It is advised to do a couple of test runs with a smaller dataset, *i.e.* `--dataset_config_names clean clean`, `--dataset_split_names validation test` to find good hyper-parameters for `learning_rate`, `batch_size`, `num_warmup_steps`, and the optimizer. A good metric to observe during training is the gradient norm which should ideally be between 0.5 and 2. --- --- **NOTE 2** When training a model on large datasets it is recommended to run the data preprocessing in a first run in a **non-distributed** mode via `--preprocessing_only` so that when running the model in **distributed** mode in a second step the preprocessed data can easily be loaded on each distributed device. --- ### Demo In this demo run we pre-train a `"base-sized"` Wav2Vec2 model simply only on the validation and test data of [librispeech_asr](https://huggingface.co/datasets/librispeech_asr). The demo is run on two Titan RTX (24 GB RAM each). In case you have less RAM available per device, consider reducing `--batch_size` and/or the `--max_duration_in_seconds`. ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name="librispeech_asr" \ --dataset_config_names clean clean \ --dataset_split_names validation test \ --model_name_or_path="patrickvonplaten/wav2vec2-base-v2" \ --output_dir="./wav2vec2-pretrained-demo" \ --max_train_steps="20000" \ --num_warmup_steps="32000" \ --gradient_accumulation_steps="8" \ --learning_rate="0.005" \ --weight_decay="0.01" \ --max_duration_in_seconds="20.0" \ --min_duration_in_seconds="2.0" \ --logging_steps="1" \ --saving_steps="10000" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --adam_epsilon="1e-06" \ --gradient_checkpointing \ --mask_time_prob="0.65" \ --mask_time_length="10" ``` The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/wav2vec2-pretrained-demo/reports/Wav2Vec2-PreTraining-Demo-Run--VmlldzoxMDk3MjAw?accessToken=oa05s1y57lizo2ocxy3k01g6db1u4pt8m6ur2n8nl4cb0ug02ms2cw313kb8ruch). ### Base To pre-train `"base-sized"` Wav2Vec2 model, *e.g.* [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on [librispeech_asr](https://huggingface.co/datasets/librispeech_asr), the following command can be run: ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name=librispeech_asr \ --dataset_config_names clean clean other \ --dataset_split_names train.100 train.360 train.500 \ --model_name_or_path="patrickvonplaten/wav2vec2-base-v2" \ --output_dir="./wav2vec2-pretrained-demo" \ --max_train_steps="200000" \ --num_warmup_steps="32000" \ --gradient_accumulation_steps="4" \ --learning_rate="0.001" \ --weight_decay="0.01" \ --max_duration_in_seconds="20.0" \ --min_duration_in_seconds="2.0" \ --logging_steps="1" \ --saving_steps="10000" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --adam_epsilon="1e-06" \ --gradient_checkpointing \ --mask_time_prob="0.65" \ --mask_time_length="10" ``` The experiment was run on 8 GPU V100 (16 GB RAM each) for 4 days. In case you have more than 8 GPUs available for a higher effective `batch_size`, it is recommended to increase the `learning_rate` to `0.005` for faster convergence. The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/test/reports/Wav2Vec2-Base--VmlldzoxMTUyODQ0?accessToken=rg6e8u9yizx964k8q47zctq1m4afpvtn1i3qi9exgdmzip6xwkfzvagfajpzj55n) and the checkpoint pretrained for 85,000 steps can be accessed [here](https://huggingface.co/patrickvonplaten/wav2vec2-base-repro-960h-libri-85k-steps) ### Large To pre-train `"large-sized"` Wav2Vec2 model, *e.g.* [facebook/wav2vec2-large-lv60](https://huggingface.co/facebook/wav2vec2-large-lv60), on [librispeech_asr](https://huggingface.co/datasets/librispeech_asr), the following command can be run: ```bash accelerate launch run_wav2vec2_pretraining_no_trainer.py \ --dataset_name=librispeech_asr \ --dataset_config_names clean clean other \ --dataset_split_names train.100 train.360 train.500 \ --output_dir=./test \ --max_train_steps=200000 \ --num_warmup_steps=32000 \ --gradient_accumulation_steps=8 \ --learning_rate=0.001 \ --weight_decay=0.01 \ --max_duration_in_seconds=20.0 \ --min_duration_in_seconds=2.0 \ --model_name_or_path=./ --logging_steps=1 \ --saving_steps=10000 \ --per_device_train_batch_size=2 \ --per_device_eval_batch_size=4 \ --adam_beta1=0.9 \ --adam_beta2=0.98 \ --adam_epsilon=1e-06 \ --gradient_checkpointing \ --mask_time_prob=0.65 \ --mask_time_length=10 ``` The experiment was run on 8 GPU V100 (16 GB RAM each) for 7 days. In case you have more than 8 GPUs available for a higher effective `batch_size`, it is recommended to increase the `learning_rate` to `0.005` for faster convergence. The results of this run can be seen [here](https://wandb.ai/patrickvonplaten/pretraining-wav2vec2/reports/Wav2Vec2-Large--VmlldzoxMTAwODM4?accessToken=wm3qzcnldrwsa31tkvf2pdmilw3f63d4twtffs86ou016xjbyilh55uoi3mo1qzc) and the checkpoint pretrained for 120,000 steps can be accessed [here](https://huggingface.co/patrickvonplaten/wav2vec2-large-repro-960h-libri-120k-steps)
transformers/examples/pytorch/speech-pretraining/README.md/0
{ "file_path": "transformers/examples/pytorch/speech-pretraining/README.md", "repo_id": "transformers", "token_count": 2600 }
65
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning the library models for text classification.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import List, Optional import datasets import evaluate import numpy as np from datasets import Value, load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) do_regression: bool = field( default=None, metadata={ "help": "Whether to do regression instead of classification. If None, will be inferred from the dataset." }, ) text_column_names: Optional[str] = field( default=None, metadata={ "help": ( "The name of the text column in the input dataset or a CSV/JSON file. " 'If not specified, will use the "sentence" column for single/multi-label classification task.' ) }, ) text_column_delimiter: Optional[str] = field( default=" ", metadata={"help": "The delimiter to use to join text columns into a single sentence."} ) train_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the train split in the input dataset. If not specified, will use the "train" split when do_train is enabled' }, ) validation_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the validation split in the input dataset. If not specified, will use the "validation" split when do_eval is enabled' }, ) test_split_name: Optional[str] = field( default=None, metadata={ "help": 'The name of the test split in the input dataset. If not specified, will use the "test" split when do_predict is enabled' }, ) remove_splits: Optional[str] = field( default=None, metadata={"help": "The splits to remove from the dataset. Multiple splits should be separated by commas."}, ) remove_columns: Optional[str] = field( default=None, metadata={"help": "The columns to remove from the dataset. Multiple columns should be separated by commas."}, ) label_column_name: Optional[str] = field( default=None, metadata={ "help": ( "The name of the label column in the input dataset or a CSV/JSON file. " 'If not specified, will use the "label" column for single/multi-label classification task' ) }, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) shuffle_train_dataset: bool = field( default=False, metadata={"help": "Whether to shuffle the train dataset or not."} ) shuffle_seed: int = field( default=42, metadata={"help": "Random seed that will be used to shuffle the train dataset."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) metric_name: Optional[str] = field(default=None, metadata={"help": "The metric to use for evaluation."}) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.dataset_name is None: if self.train_file is None or self.validation_file is None: raise ValueError(" training/validation file or a dataset name.") train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) ignore_mismatched_sizes: bool = field( default=False, metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."}, ) def get_label_list(raw_dataset, split="train") -> List[str]: """Get the list of labels from a multi-label dataset""" if isinstance(raw_dataset[split]["label"][0], list): label_list = [label for sample in raw_dataset[split]["label"] for label in sample] label_list = list(set(label_list)) else: label_list = raw_dataset[split].unique("label") # we will treat the label list as a list of string instead of int, consistent with model.config.label2id label_list = [str(label) for label in label_list] return label_list def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_classification", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files, or specify a dataset name # to load from huggingface/datasets. In ether case, you can specify a the key of the column(s) containing the text and # the key of the column containing the label. If multiple columns are specified for the text, they will be joined together # for the actual text value. # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # Try print some info about the dataset logger.info(f"Dataset loaded: {raw_datasets}") logger.info(raw_datasets) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a dataset name or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. if data_args.remove_splits is not None: for split in data_args.remove_splits.split(","): logger.info(f"removing split {split}") raw_datasets.pop(split) if data_args.train_split_name is not None: logger.info(f"using {data_args.train_split_name} as train set") raw_datasets["train"] = raw_datasets[data_args.train_split_name] raw_datasets.pop(data_args.train_split_name) if data_args.validation_split_name is not None: logger.info(f"using {data_args.validation_split_name} as validation set") raw_datasets["validation"] = raw_datasets[data_args.validation_split_name] raw_datasets.pop(data_args.validation_split_name) if data_args.test_split_name is not None: logger.info(f"using {data_args.test_split_name} as test set") raw_datasets["test"] = raw_datasets[data_args.test_split_name] raw_datasets.pop(data_args.test_split_name) if data_args.remove_columns is not None: for split in raw_datasets.keys(): for column in data_args.remove_columns.split(","): logger.info(f"removing column {column} from split {split}") raw_datasets[split] = raw_datasets[split].remove_columns(column) if data_args.label_column_name is not None and data_args.label_column_name != "label": for key in raw_datasets.keys(): raw_datasets[key] = raw_datasets[key].rename_column(data_args.label_column_name, "label") # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = ( raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if data_args.do_regression is None else data_args.do_regression ) is_multi_label = False if is_regression: label_list = None num_labels = 1 # regession requires float as label type, let's cast it if needed for split in raw_datasets.keys(): if raw_datasets[split].features["label"].dtype not in ["float32", "float64"]: logger.warning( f"Label type for {split} set to float32, was {raw_datasets[split].features['label'].dtype}" ) features = raw_datasets[split].features features.update({"label": Value("float32")}) try: raw_datasets[split] = raw_datasets[split].cast(features) except TypeError as error: logger.error( f"Unable to cast {split} set to float32, please check the labels are correct, or maybe try with --do_regression=False" ) raise error else: # classification if raw_datasets["train"].features["label"].dtype == "list": # multi-label classification is_multi_label = True logger.info("Label type is list, doing multi-label classification") # Trying to find the number of labels in a multi-label classification task # We have to deal with common cases that labels appear in the training set but not in the validation/test set. # So we build the label list from the union of labels in train/val/test. label_list = get_label_list(raw_datasets, split="train") for split in ["validation", "test"]: if split in raw_datasets: val_or_test_labels = get_label_list(raw_datasets, split=split) diff = set(val_or_test_labels).difference(set(label_list)) if len(diff) > 0: # add the labels that appear in val/test but not in train, throw a warning logger.warning( f"Labels {diff} in {split} set but not in training set, adding them to the label list" ) label_list += list(diff) # if label is -1, we throw a warning and remove it from the label list for label in label_list: if label == -1: logger.warning("Label -1 found in label list, removing it.") label_list.remove(label) label_list.sort() num_labels = len(label_list) if num_labels <= 1: raise ValueError("You need more than one label to do classification.") # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task="text-classification", cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) if is_regression: config.problem_type = "regression" logger.info("setting problem type to regression") elif is_multi_label: config.problem_type = "multi_label_classification" logger.info("setting problem type to multi label classification") else: config.problem_type = "single_label_classification" logger.info("setting problem type to single label classification") tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # for training ,we will update the config with label infos, # if do_train is not set, we will use the label infos in the config if training_args.do_train and not is_regression: # classification, training label_to_id = {v: i for i, v in enumerate(label_list)} # update config with label infos if model.config.label2id != label_to_id: logger.warning( "The label2id key in the model config.json is not equal to the label2id key of this " "run. You can ignore this if you are doing finetuning." ) model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in label_to_id.items()} elif not is_regression: # classification, but not training logger.info("using label infos in the model config") logger.info("label2id: {}".format(model.config.label2id)) label_to_id = model.config.label2id else: # regression label_to_id = None if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def multi_labels_to_ids(labels: List[str]) -> List[float]: ids = [0.0] * len(label_to_id) # BCELoss requires float as target type for label in labels: ids[label_to_id[label]] = 1.0 return ids def preprocess_function(examples): if data_args.text_column_names is not None: text_column_names = data_args.text_column_names.split(",") # join together text columns into "sentence" column examples["sentence"] = examples[text_column_names[0]] for column in text_column_names[1:]: for i in range(len(examples[column])): examples["sentence"][i] += data_args.text_column_delimiter + examples[column][i] # Tokenize the texts result = tokenizer(examples["sentence"], padding=padding, max_length=max_seq_length, truncation=True) if label_to_id is not None and "label" in examples: if is_multi_label: result["label"] = [multi_labels_to_ids(l) for l in examples["label"]] else: result["label"] = [(label_to_id[str(l)] if l != -1 else -1) for l in examples["label"]] return result # Running the preprocessing pipeline on all the datasets with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset.") train_dataset = raw_datasets["train"] if data_args.shuffle_train_dataset: logger.info("Shuffling the training dataset") train_dataset = train_dataset.shuffle(seed=data_args.shuffle_seed) if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation or test dataset if validation is not defined.") else: logger.warning("Validation dataset not found. Falling back to test dataset for validation.") eval_dataset = raw_datasets["test"] else: eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] # remove label column if it exists if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") if data_args.metric_name is not None: metric = ( evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir) if is_multi_label else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir) ) logger.info(f"Using metric {data_args.metric_name} for evaluation.") else: if is_regression: metric = evaluate.load("mse", cache_dir=model_args.cache_dir) logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.") else: if is_multi_label: metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir) logger.info( "Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite." ) else: metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir) logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.") def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions if is_regression: preds = np.squeeze(preds) result = metric.compute(predictions=preds, references=p.label_ids) elif is_multi_label: preds = np.array([np.where(p > 0, 1, 0) for p in preds]) # convert logits to multi-hot encoding # Micro F1 is commonly used in multi-label classification result = metric.compute(predictions=preds, references=p.label_ids, average="micro") else: preds = np.argmax(preds, axis=1) result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, processing_class=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") # Removing the `label` columns if exists because it might contains -1 and Trainer won't like that. if "label" in predict_dataset.features: predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions if is_regression: predictions = np.squeeze(predictions) elif is_multi_label: # Convert logits to multi-hot encoding. We compare the logits to 0 instead of 0.5, because the sigmoid is not applied. # You can also pass `preprocess_logits_for_metrics=lambda logits, labels: nn.functional.sigmoid(logits)` to the Trainer # and set p > 0.5 below (less efficient in this case) predictions = np.array([np.where(p > 0, 1, 0) for p in predictions]) else: predictions = np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info("***** Predict results *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") elif is_multi_label: # recover from multi-hot encoding item = [label_list[i] for i in range(len(item)) if item[i] == 1] writer.write(f"{index}\t{item}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") logger.info("Predict results saved at {}".format(output_predict_file)) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/pytorch/text-classification/run_classification.py/0
{ "file_path": "transformers/examples/pytorch/text-classification/run_classification.py", "repo_id": "transformers", "token_count": 13914 }
66
from dataclasses import dataclass, field from typing import Optional @dataclass class TrainingArguments: """ Configuration for training model. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."} ) save_dir: Optional[str] = field( default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} ) dataset_name_train: Optional[str] = field( default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."} ) dataset_name_valid: Optional[str] = field( default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} ) train_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for training."}) valid_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for evaluation."}) weight_decay: Optional[float] = field(default=0.1, metadata={"help": "Value of weight decay."}) shuffle_buffer: Optional[int] = field( default=10000, metadata={"help": "Size of buffer used to shuffle streaming dataset."} ) learning_rate: Optional[float] = field(default=2e-4, metadata={"help": "Learning rate fo training."}) lr_scheduler_type: Optional[str] = field(default="cosine", metadata={"help": "Learning rate."}) num_warmup_steps: Optional[int] = field( default=750, metadata={"help": "Number of warmup steps in the learning rate schedule."} ) gradient_accumulation_steps: Optional[int] = field( default=16, metadata={"help": "Number of gradient accumulation steps."} ) gradient_checkpointing: Optional[bool] = field( default=True, metadata={"help": "Use gradient checkpointing to reduce memory footprint."} ) max_train_steps: Optional[int] = field(default=50000, metadata={"help": "Maximum number of training steps."}) max_eval_steps: Optional[int] = field( default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) seq_length: Optional[int] = field(default=1024, metadata={"help": "Sequence lengths used for training."}) seed: Optional[int] = field(default=1, metadata={"help": "Training seed."}) save_checkpoint_steps: Optional[int] = field( default=1024, metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."}, ) resume_from_checkpoint: Optional[str] = field( default=None, metadata={"help": "States path if the training should continue from a checkpoint folder."} ) tokenized: Optional[bool] = field(default=False, metadata={"help": "If True the data is pretokenized."}) @dataclass class EvaluationArguments: """ Configuration for evaluating model. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} ) dataset_name: Optional[str] = field( default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."} ) batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size used for evaluation."}) max_eval_steps: Optional[int] = field( default=-1, metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} ) seq_length: Optional[int] = field(default=1024, metadata={"help": "Length of sequences to be evaluated."}) seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) @dataclass class HumanEvalArguments: """ Configuration for running evaluation on HumanEval dataset. """ model_ckpt: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."} ) num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) num_tasks: Optional[int] = field( default=None, metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."}, ) do_sample: Optional[bool] = field( default=True, metadata={"help": "Sample from the language model's output distribution."} ) temperature: Optional[float] = field(default=0.2, metadata={"help": "Sampling temperature used for generation."}) max_new_tokens: Optional[int] = field(default=256, metadata={"help": "Maximum number of newly generated tokens."}) top_k: Optional[int] = field(default=0, metadata={"help": "Top-k parameter used for generation."}) top_p: Optional[float] = field(default=0.95, metadata={"help": "Top-p parameter used for nucleus sampling."}) batch_size: Optional[int] = field(default=10, metadata={"help": "Number of generations to run in parallel."}) n_samples: Optional[int] = field( default=200, metadata={"help": "Number of completions to generate for each sample."} ) seed: Optional[int] = field(default=1, metadata={"help": "Random seed used for evaluation."}) output_file: Optional[str] = field( default="eval_results.json", metadata={"help": "Random seed used for evaluation."} ) HF_ALLOW_CODE_EVAL: Optional[str] = field( default="0", metadata={"help": "Allow `code_eval` to execute Python code on machine"} ) device_int: Optional[int] = field( default=-1, metadata={ "help": ( "Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive" " number corresponds to which GPU device id to run on." ) }, ) @dataclass class PreprocessingArguments: """ Configuration for preprocessing data. """ num_workers: Optional[int] = field( default=None, metadata={ "help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available." }, ) dataset_name: Optional[str] = field( default="transformersbook/codeparrot", metadata={"help": "Folder or name of dataset to process."} ) output_dir: Optional[str] = field( default="codeparrot-clean", metadata={"help": "Folder to save processed dataset."} ) samples_per_file: Optional[int] = field( default=100_000, metadata={"help": "Number of files to save per JSON output file."} ) text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) line_max: Optional[float] = field( default=1000, metadata={"help": "Maximum line length in file, otherwise file is filtered."} ) line_mean: Optional[float] = field( default=100, metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} ) alpha_frac: Optional[float] = field( default=0.25, metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} ) min_token_ratio: Optional[float] = field( default=1.5, metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} ) filter_proba: Optional[float] = field( default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."} ) tokenizer: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}, ) near_deduplication: Optional[bool] = field( default=False, metadata={"help": "If True, near-duplicate samples are removed."} ) jaccard_threshold: Optional[float] = field( default=0.85, metadata={"help": "Jaccard threshold for near-duplicate samples."} ) @dataclass class TokenizerTrainingArguments: """ Configuration for tokenizer training. """ base_tokenizer: Optional[str] = field( default="openai-community/gpt2", metadata={"help": "Base tokenizer to build new tokenizer from."} ) dataset_name: Optional[str] = field( default="transformersbook/codeparrot-train", metadata={"help": "Dataset to train tokenizer on."} ) text_column: Optional[str] = field(default="content", metadata={"help": "Column containing text data to process."}) vocab_size: Optional[int] = field(default=200_000, metadata={"help": "Number of examples to train tokenizer on."}) n_examples: Optional[int] = field( default=32768, metadata={"help": "Number of examples to train the tokenizer on."} ) tokenizer_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of new tokenizer."}) push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."}) @dataclass class PretokenizationArguments: """ Configuration for data pretokenization. """ tokenizer_dir: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."} ) dataset_name: Optional[str] = field( default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."} ) tokenized_data_repo: Optional[str] = field( default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."} ) num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."}) @dataclass class InitializationArguments: """ Configuration for initializing new model. """ config_name: Optional[str] = field( default="openai-community/gpt2-large", metadata={"help": "Configuration to use for model initialization."} ) tokenizer_name: Optional[str] = field( default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."} ) model_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of the created model."}) push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."})
transformers/examples/research_projects/codeparrot/scripts/arguments.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/arguments.py", "repo_id": "transformers", "token_count": 3565 }
67
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training the distilled model. """ import argparse import logging import pickle from collections import Counter logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)" ) parser.add_argument( "--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset." ) parser.add_argument( "--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file." ) parser.add_argument("--vocab_size", default=30522, type=int) args = parser.parse_args() logger.info(f"Loading data from {args.data_file}") with open(args.data_file, "rb") as fp: data = pickle.load(fp) logger.info("Counting occurrences for MLM.") counter = Counter() for tk_ids in data: counter.update(tk_ids) counts = [0] * args.vocab_size for k, v in counter.items(): counts[k] = v logger.info(f"Dump to {args.token_counts_dump}") with open(args.token_counts_dump, "wb") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
transformers/examples/research_projects/distillation/scripts/token_counts.py/0
{ "file_path": "transformers/examples/research_projects/distillation/scripts/token_counts.py", "repo_id": "transformers", "token_count": 726 }
68
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Vision-Text dual encoder model training examples > Note: This example is experimental and might not give the best possible results The following example showcases how to train a CLIP like vision-text dual encoder model using a pre-trained vision and text encoder using the JAX/Flax backend. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by the [CLIP](https://openai.com/blog/clip/) approach, introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. In this example we will use the vision model from [CLIP](https://huggingface.co/models?filter=clip) as the image encoder and [`FacebookAI/roberta-base`](https://huggingface.co/FacebookAI/roberta-base) as the text encoder. Note that one can also use the [ViT](https://huggingface.co/models?filter=vit) model as image encoder and any other BERT or ROBERTa model as text encoder. To train the model on languages other than English one should choose a text encoder trained on the desired language and a image-text dataset in that language. One such dataset is [WIT](https://github.com/google-research-datasets/wit). Let's start by creating a model repository to save the trained model and logs. Here we call the model `"clip-roberta-base"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ```bash huggingface-cli repo create clip-roberta-base ``` Next we clone the model repository to add the tokenizer and model files. ```bash git clone https://huggingface.co/<your-username>/clip-roberta-base ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ```bash cd clip-roberta-base git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_hybrid_clip.py`. ```bash export MODEL_DIR="./clip-roberta-base ln -s ~/transformers/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py run_hybrid_clip.py ``` ## How to use the `FlaxHybridCLIP` model: The `FlaxHybridCLIP` class let's you load any text and vision encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained text and vision models. ```python from modeling_hybrid_clip import FlaxHybridCLIP model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32") # save the model model.save_pretrained("bert-clip") # load the saved model model = FlaxHybridCLIP.from_pretrained("bert-clip") ``` If the checkpoints are in PyTorch then one could pass `text_from_pt=True` and `vision_from_pt=True`. This will load the model PyTorch checkpoints convert them to flax and load the model. ```python model = FlaxHybridCLIP.from_text_vision_pretrained("google-bert/bert-base-uncased", "openai/clip-vit-base-patch32", text_from_pt=True, vision_from_pt=True) ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ## Prepare the dataset We will use the MS-COCO dataset to train our dual encoder model. MS-COCO contains over 82,000 images, each of which has at least 5 different caption annotations. The dataset is usually used for image captioning tasks, but we can repurpose the image-caption pairs to train our dual encoder model for image search. ### Download and extract the data. It consists of two compressed folders: one with images, and the other—with associated image captions. Note that the compressed images folder is 13GB in size. ```bash wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip wget http://images.cocodataset.org/zips/train2014.zip unzip annotations_trainval2014.zip unzip train2014.zip mkdir coco_dataset mv train2014 coco_dataset/ mv annotations coco_dataset/ ``` ### Prepare dataset files and split the dataset. ```python import json import collections images_dir = "coco_dataset/train2014" annotation_file = "coco_dataset/annotations/captions_train2014.json" with open(annotation_file, "r") as f: annotations = json.load(f)["annotations"] image_path_to_caption = collections.defaultdict(list) for element in annotations: caption = f"{element['caption'].lower().rstrip('.')}" image_path = images_dir + "/COCO_train2014_" + "%012d.jpg" % (element["image_id"]) image_path_to_caption[image_path].append(caption) lines = [] for image_path, captions in image_path_to_caption.items(): lines.append(json.dumps({"image_path": image_path, "captions": captions})) train_lines = lines[:-8000] valid_line = lines[-8000:] with open("coco_dataset/train_dataset.json", "w") as f: f.write("\n".join(train_lines)) with open("coco_dataset/valid_dataset.json", "w") as f: f.write("\n".join(valid_line)) ``` > Note: The data loading and processing part of this script can still be improved for maximum performance. In particular one should decode the images beforehand and use those instead decoding them each time. If the dataset is small or if you have huge disk space the you could also pre-process all the dataset beforehand and then use it. ## Train the model Next we can run the example script to train the model: ```bash python run_hybrid_clip.py \ --output_dir ${MODEL_DIR} \ --text_model_name_or_path="FacebookAI/roberta-base" \ --vision_model_name_or_path="openai/clip-vit-base-patch32" \ --tokenizer_name="FacebookAI/roberta-base" \ --train_file="coco_dataset/train_dataset.json" \ --validation_file="coco_dataset/validation_dataset.json" \ --do_train --do_eval \ --num_train_epochs="40" --max_seq_length 96 \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --preprocessing_num_workers 32 \ --push_to_hub ``` This should finish in ~1h50 mins with min validation loss 2.43. Training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/RUNPYd1yRgSD5kZSb9hDig/#scalars)
transformers/examples/research_projects/jax-projects/hybrid_clip/README.md/0
{ "file_path": "transformers/examples/research_projects/jax-projects/hybrid_clip/README.md", "repo_id": "transformers", "token_count": 2322 }
69
## MM-IMDb Based on the script [`run_mmimdb.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/mm-imdb/run_mmimdb.py). [MM-IMDb](http://lisi1.unal.edu.co/mmimdb/) is a Multimodal dataset with around 26,000 movies including images, plots and other metadata. ### Training on MM-IMDb ```bash python run_mmimdb.py \ --data_dir /path/to/mmimdb/dataset/ \ --model_type bert \ --model_name_or_path google-bert/bert-base-uncased \ --output_dir /path/to/save/dir/ \ --do_train \ --do_eval \ --max_seq_len 512 \ --gradient_accumulation_steps 20 \ --num_image_embeds 3 \ --num_train_epochs 100 \ --patience 5 ```
transformers/examples/research_projects/mm-imdb/README.md/0
{ "file_path": "transformers/examples/research_projects/mm-imdb/README.md", "repo_id": "transformers", "token_count": 287 }
70
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Bart + Beam Search to ONNX Author: [@fatcat-z](https://github.com/fatcat-z) This folder contains an example of exporting Bart + Beam Search generation (`BartForConditionalGeneration`) to ONNX. Beam Search contains a for-loop workflow, so we need to make them TorchScript-compatible for exporting to ONNX. This example shows how to make a Bart model be TorchScript-compatible by wrapping up it into a new model. In addition, some changes were made to the `beam_search()` function to make it TorchScript-compatible. ## How to run the example To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install '.[onnxruntime]' ``` Then cd in this example folder and run ```bash pip install -r requirements.txt ``` Now you can run the example command below to get the example ONNX file: ```bash python run_onnx_exporter.py --model_name_or_path facebook/bart-base ```
transformers/examples/research_projects/onnx/summarization/README.md/0
{ "file_path": "transformers/examples/research_projects/onnx/summarization/README.md", "repo_id": "transformers", "token_count": 463 }
71
#! /usr/bin/env python3 # coding=utf-8 # Copyright (c) 2019 Uber Technologies, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Example command with bag of words: python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95 Example command with discriminator: python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95 """ import argparse import json from operator import add from typing import List, Optional, Tuple, Union import numpy as np import torch from pplm_classification_head import ClassificationHead from torch import nn from tqdm import trange from transformers import GPT2LMHeadModel, GPT2Tokenizer from transformers.file_utils import cached_path PPLM_BOW = 1 PPLM_DISCRIM = 2 PPLM_BOW_DISCRIM = 3 SMALL_CONST = 1e-15 BIG_CONST = 1e10 BAG_OF_WORDS_ARCHIVE_MAP = { "legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt", "military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt", "politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt", "religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt", "science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt", "space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt", "technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt", } DISCRIMINATOR_MODELS_PARAMS = { "clickbait": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt", "class_size": 2, "embed_size": 1024, "class_vocab": {"non_clickbait": 0, "clickbait": 1}, "default_class": 1, "pretrained_model": "openai-community/gpt2-medium", }, "sentiment": { "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt", "class_size": 5, "embed_size": 1024, "class_vocab": {"very_positive": 2, "very_negative": 3}, "default_class": 3, "pretrained_model": "openai-community/gpt2-medium", }, } def top_k_filter(logits, k, probs=False): """ Masks everything but the k top entries as -infinity (1e10). Used to mask logits such that e^-infinity -> 0 won't contribute to the sum of the denominator. """ if k == 0: return logits else: values = torch.topk(logits, k)[0] batch_mins = values[:, -1].view(-1, 1).expand_as(logits) if probs: return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits) return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits) def perturb_past( past, model, last, unpert_past=None, unpert_logits=None, accumulated_hidden=None, grad_norms=None, stepsize=0.01, one_hot_bows_vectors=None, classifier=None, class_label=None, loss_type=0, num_iterations=3, horizon_length=1, window_length=0, decay=False, gamma=1.5, kl_scale=0.01, device="cuda", ): # Generate inital perturbed past grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past] if accumulated_hidden is None: accumulated_hidden = 0 if decay: decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:] else: decay_mask = 1.0 # TODO fix this comment (SUMANTH) # Generate a mask is gradient perturbated is based on a past window _, _, _, curr_length, _ = past[0].shape if curr_length > window_length and window_length > 0: ones_key_val_shape = tuple(past[0].shape[:-2]) + (window_length,) + tuple(past[0].shape[-1:]) zeros_key_val_shape = tuple(past[0].shape[:-2]) + (curr_length - window_length,) + tuple(past[0].shape[-1:]) ones_mask = torch.ones(ones_key_val_shape) ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3) ones_mask = ones_mask.permute(0, 1, 2, 4, 3) window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device) else: window_mask = torch.ones_like(past[0]).to(device) # accumulate perturbations for num_iterations loss_per_iter = [] new_accumulated_hidden = None for i in range(num_iterations): print("Iteration ", i + 1) curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] # make sure p_.grad is not None for p_ in curr_perturbation: p_.retain_grad() # Compute hidden using perturbed past perturbed_past = list(map(add, past, curr_perturbation)) _, _, _, curr_length, _ = curr_perturbation[0].shape lm_output = model(last, past_key_values=perturbed_past) all_logits, all_hidden = lm_output["logits"], lm_output["hidden_states"] hidden = all_hidden[-1] new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach() # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth) logits = all_logits[:, -1, :] probs = nn.functional.softmax(logits, dim=-1) loss = 0.0 loss_list = [] if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM: for one_hot_bow in one_hot_bows_vectors: bow_logits = torch.mm(probs, torch.t(one_hot_bow)) bow_loss = -torch.log(torch.sum(bow_logits)) loss += bow_loss loss_list.append(bow_loss) print(" pplm_bow_loss:", loss.data.cpu().numpy()) if loss_type == 2 or loss_type == 3: ce_loss = nn.CrossEntropyLoss() # TODO why we need to do this assignment and not just using unpert_past? (Sumanth) curr_unpert_past = unpert_past curr_probs = torch.unsqueeze(probs, dim=1) wte = model.resize_token_embeddings() for _ in range(horizon_length): inputs_embeds = torch.matmul(curr_probs, wte.weight.data) lm_output = model(past_key_values=curr_unpert_past, inputs_embeds=inputs_embeds) curr_all_logits, curr_unpert_past, curr_all_hidden = ( lm_output["logits"], lm_output["past_key_values"], lm_output["hidden_states"], ) curr_logits = curr_all_logits[:, -1, :] curr_probs = nn.functional.softmax(curr_logits, dim=-1) curr_probs = torch.unsqueeze(curr_probs, dim=1) curr_hidden = curr_all_hidden[-1] new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1) prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length)) label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long) discrim_loss = ce_loss(prediction, label) print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy()) loss += discrim_loss loss_list.append(discrim_loss) kl_loss = 0.0 if kl_scale > 0.0: unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach() correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach() corrected_probs = probs + correction.detach() kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum()) print(" kl_loss", kl_loss.data.cpu().numpy()) loss += kl_loss loss_per_iter.append(loss.data.cpu().numpy()) print(" pplm_loss", (loss - kl_loss).data.cpu().numpy()) # compute gradients loss.backward() # calculate gradient norms if grad_norms is not None and loss_type == PPLM_BOW: grad_norms = [ torch.max(grad_norms[index], torch.norm(p_.grad * window_mask)) for index, p_ in enumerate(curr_perturbation) ] else: grad_norms = [ (torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation) ] # normalize gradients grad = [ -stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy() for index, p_ in enumerate(curr_perturbation) ] # accumulate gradient grad_accumulator = list(map(add, grad, grad_accumulator)) # reset gradients, just to make sure for p_ in curr_perturbation: p_.grad.data.zero_() # removing past from the graph new_past = [] for p_ in past: new_past.append(p_.detach()) past = new_past # apply the accumulated perturbations to the past grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator] pert_past = list(map(add, past, grad_accumulator)) return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter def get_classifier( name: Optional[str], class_label: Union[str, int], device: str ) -> Tuple[Optional[ClassificationHead], Optional[int]]: if name is None: return None, None params = DISCRIMINATOR_MODELS_PARAMS[name] classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device) if "url" in params: resolved_archive_file = cached_path(params["url"]) elif "path" in params: resolved_archive_file = params["path"] else: raise ValueError("Either url or path have to be specified in the discriminator model parameters") classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device)) classifier.eval() if isinstance(class_label, str): if class_label in params["class_vocab"]: label_id = params["class_vocab"][class_label] else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) elif isinstance(class_label, int): if class_label in set(params["class_vocab"].values()): label_id = class_label else: label_id = params["default_class"] print("class_label {} not in class_vocab".format(class_label)) print("available values are: {}".format(params["class_vocab"])) print("using default class {}".format(label_id)) else: label_id = params["default_class"] return classifier, label_id def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]: bow_indices = [] for id_or_path in bag_of_words_ids_or_paths: if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP: filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path]) else: filepath = id_or_path with open(filepath, "r") as f: words = f.read().strip().split("\n") bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words]) return bow_indices def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"): if bow_indices is None: return None one_hot_bows_vectors = [] for single_bow in bow_indices: single_bow = list(filter(lambda x: len(x) <= 1, single_bow)) single_bow = torch.tensor(single_bow).to(device) num_words = single_bow.shape[0] one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device) one_hot_bow.scatter_(1, single_bow, 1) one_hot_bows_vectors.append(one_hot_bow) return one_hot_bows_vectors def full_text_generation( model, tokenizer, context=None, num_samples=1, device="cuda", bag_of_words=None, discrim=None, class_label=None, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, **kwargs, ): classifier, class_id = get_classifier(discrim, class_label, device) bow_indices = [] if bag_of_words: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) if bag_of_words and classifier: print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.") loss_type = PPLM_BOW_DISCRIM elif bag_of_words: loss_type = PPLM_BOW print("Using PPLM-BoW") elif classifier is not None: loss_type = PPLM_DISCRIM print("Using PPLM-Discrim") else: raise Exception("Specify either a bag of words or a discriminator") unpert_gen_tok_text, _, _ = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, length=length, sample=sample, perturb=False, repetition_penalty=repetition_penalty, ) if device == "cuda": torch.cuda.empty_cache() pert_gen_tok_texts = [] discrim_losses = [] losses_in_time = [] for i in range(num_samples): pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm( model=model, tokenizer=tokenizer, context=context, device=device, perturb=True, bow_indices=bow_indices, classifier=classifier, class_label=class_id, loss_type=loss_type, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) pert_gen_tok_texts.append(pert_gen_tok_text) if classifier is not None: discrim_losses.append(discrim_loss.data.cpu().numpy()) losses_in_time.append(loss_in_time) if device == "cuda": torch.cuda.empty_cache() return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time def generate_text_pplm( model, tokenizer, context=None, past=None, device="cuda", perturb=True, bow_indices=None, classifier=None, class_label=None, loss_type=0, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, repetition_penalty=1.0, ): output_so_far = None if context: context_t = torch.tensor(context, device=device, dtype=torch.long) while len(context_t.shape) < 2: context_t = context_t.unsqueeze(0) output_so_far = context_t # collect one hot vectors for bags of words one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device) grad_norms = None last = None unpert_discrim_loss = 0 loss_in_time = [] for i in trange(length, ascii=True): # Get past/probs for current output, except for last word # Note that GPT takes 2 inputs: past + current_token # run model forward to obtain unperturbed if past is None and output_so_far is not None: last = output_so_far[:, -1:] if output_so_far.shape[1] > 1: past = model(output_so_far[:, :-1])["past_key_values"] lm_output = model(output_so_far) unpert_logits, unpert_past, unpert_all_hidden = ( lm_output["logits"], lm_output["past_key_values"], lm_output["hidden_states"], ) unpert_last_hidden = unpert_all_hidden[-1] # check if we are abowe grad max length if i >= grad_length: current_stepsize = stepsize * 0 else: current_stepsize = stepsize # modify the past if necessary if not perturb or num_iterations == 0: pert_past = past else: accumulated_hidden = unpert_last_hidden[:, :-1, :] accumulated_hidden = torch.sum(accumulated_hidden, dim=1) if past is not None: pert_past, _, grad_norms, loss_this_iter = perturb_past( past, model, last, unpert_past=unpert_past, unpert_logits=unpert_logits, accumulated_hidden=accumulated_hidden, grad_norms=grad_norms, stepsize=current_stepsize, one_hot_bows_vectors=one_hot_bows_vectors, classifier=classifier, class_label=class_label, loss_type=loss_type, num_iterations=num_iterations, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, kl_scale=kl_scale, device=device, ) loss_in_time.append(loss_this_iter) else: pert_past = past lm_output = model(last, past_key_values=pert_past) pert_logits, past = ( lm_output["logits"], lm_output["past_key_values"], ) pert_logits = pert_logits[:, -1, :] / temperature # + SMALL_CONST for token_idx in set(output_so_far[0].tolist()): if pert_logits[0, token_idx] < 0: pert_logits[0, token_idx] *= repetition_penalty else: pert_logits[0, token_idx] /= repetition_penalty pert_probs = nn.functional.softmax(pert_logits, dim=-1) if classifier is not None: ce_loss = nn.CrossEntropyLoss() prediction = classifier(torch.mean(unpert_last_hidden, dim=1)) label = torch.tensor([class_label], device=device, dtype=torch.long) unpert_discrim_loss = ce_loss(prediction, label) print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy()) else: unpert_discrim_loss = 0 # Fuse the modified model and original model if perturb: unpert_probs = nn.functional.softmax(unpert_logits[:, -1, :], dim=-1) pert_probs = (pert_probs**gm_scale) * (unpert_probs ** (1 - gm_scale)) # + SMALL_CONST pert_probs = top_k_filter(pert_probs, k=top_k, probs=True) # + SMALL_CONST # rescale if torch.sum(pert_probs) <= 1: pert_probs = pert_probs / torch.sum(pert_probs) else: pert_logits = top_k_filter(pert_logits, k=top_k) # + SMALL_CONST pert_probs = nn.functional.softmax(pert_logits, dim=-1) # sample or greedy if sample: last = torch.multinomial(pert_probs, num_samples=1) else: _, last = torch.topk(pert_probs, k=1, dim=-1) # update context/output_so_far appending the new token output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1) print(tokenizer.decode(output_so_far.tolist()[0])) return output_so_far, unpert_discrim_loss, loss_in_time def set_generic_model_params(discrim_weights, discrim_meta): if discrim_weights is None: raise ValueError("When using a generic discriminator, discrim_weights need to be specified") if discrim_meta is None: raise ValueError("When using a generic discriminator, discrim_meta need to be specified") with open(discrim_meta, "r") as discrim_meta_file: meta = json.load(discrim_meta_file) meta["path"] = discrim_weights DISCRIMINATOR_MODELS_PARAMS["generic"] = meta def run_pplm_example( pretrained_model="openai-community/gpt2-medium", cond_text="", uncond=False, num_samples=1, bag_of_words=None, discrim=None, discrim_weights=None, discrim_meta=None, class_label=-1, length=100, stepsize=0.02, temperature=1.0, top_k=10, sample=False, num_iterations=3, grad_length=10000, horizon_length=1, window_length=0, decay=False, gamma=1.5, gm_scale=0.9, kl_scale=0.01, seed=0, no_cuda=False, colorama=False, repetition_penalty=1.0, ): # set Random seed torch.manual_seed(seed) np.random.seed(seed) # set the device device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu" if discrim == "generic": set_generic_model_params(discrim_weights, discrim_meta) if discrim is not None: pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"] print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model)) # load pretrained model model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True) model.to(device) model.eval() # load tokenizer tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model) # Freeze GPT-2 weights for param in model.parameters(): param.requires_grad = False # figure out conditioning text if uncond: tokenized_cond_text = tokenizer.encode([tokenizer.bos_token]) else: raw_text = cond_text while not raw_text: print("Did you forget to add `--cond_text`? ") raw_text = input("Model prompt >>> ") tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text) print("= Prefix of sentence =") print(tokenizer.decode(tokenized_cond_text)) print() # generate unperturbed and perturbed texts # full_text_generation returns: # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation( model=model, tokenizer=tokenizer, context=tokenized_cond_text, device=device, num_samples=num_samples, bag_of_words=bag_of_words, discrim=discrim, class_label=class_label, length=length, stepsize=stepsize, temperature=temperature, top_k=top_k, sample=sample, num_iterations=num_iterations, grad_length=grad_length, horizon_length=horizon_length, window_length=window_length, decay=decay, gamma=gamma, gm_scale=gm_scale, kl_scale=kl_scale, repetition_penalty=repetition_penalty, ) # untokenize unperturbed text unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0]) print("=" * 80) print("= Unperturbed generated text =") print(unpert_gen_text) print() generated_texts = [] bow_word_ids = set() if bag_of_words and colorama: bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer) for single_bow_list in bow_indices: # filtering all words in the list composed of more than 1 token filtered = list(filter(lambda x: len(x) <= 1, single_bow_list)) # w[0] because we are sure w has only 1 item because previous fitler bow_word_ids.update(w[0] for w in filtered) # iterate through the perturbed texts for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts): try: # untokenize unperturbed text if colorama: import colorama pert_gen_text = "" for word_id in pert_gen_tok_text.tolist()[0]: if word_id in bow_word_ids: pert_gen_text += "{}{}{}".format( colorama.Fore.RED, tokenizer.decode([word_id]), colorama.Style.RESET_ALL, ) else: pert_gen_text += tokenizer.decode([word_id]) else: pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0]) print("= Perturbed generated text {} =".format(i + 1)) print(pert_gen_text) print() except Exception as exc: print("Ignoring error while generating perturbed text:", exc) # keep the prefix, perturbed seq, original seq for each index generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)) return if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pretrained_model", "-M", type=str, default="openai-community/gpt2-medium", help="pretrained model name or path to local checkpoint", ) parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on") parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument( "--num_samples", type=int, default=1, help="Number of samples to generate from the modified latents", ) parser.add_argument( "--bag_of_words", "-B", type=str, default=None, help=( "Bags of words used for PPLM-BoW. " "Either a BOW id (see list in code) or a filepath. " "Multiple BoWs separated by ;" ), ) parser.add_argument( "--discrim", "-D", type=str, default=None, choices=("clickbait", "sentiment", "toxicity", "generic"), help="Discriminator to use", ) parser.add_argument( "--discrim_weights", type=str, default=None, help="Weights for the generic discriminator", ) parser.add_argument( "--discrim_meta", type=str, default=None, help="Meta information for the generic discriminator", ) parser.add_argument( "--class_label", type=int, default=-1, help="Class label used for the discriminator", ) parser.add_argument("--length", type=int, default=100) parser.add_argument("--stepsize", type=float, default=0.02) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--top_k", type=int, default=10) parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix") parser.add_argument("--num_iterations", type=int, default=3) parser.add_argument("--grad_length", type=int, default=10000) parser.add_argument( "--window_length", type=int, default=0, help="Length of past which is being optimized; 0 corresponds to infinite window length", ) parser.add_argument( "--horizon_length", type=int, default=1, help="Length of future to optimize over", ) parser.add_argument("--decay", action="store_true", help="whether to decay or not") parser.add_argument("--gamma", type=float, default=1.5) parser.add_argument("--gm_scale", type=float, default=0.9) parser.add_argument("--kl_scale", type=float, default=0.01) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--no_cuda", action="store_true", help="no cuda") parser.add_argument("--colorama", action="store_true", help="colors keywords") parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="Penalize repetition. More than 1.0 -> less repetition", ) args = parser.parse_args() run_pplm_example(**vars(args))
transformers/examples/research_projects/pplm/run_pplm.py/0
{ "file_path": "transformers/examples/research_projects/pplm/run_pplm.py", "repo_id": "transformers", "token_count": 13443 }
72
import os from functools import partial from glob import glob import faiss from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast def split_text(text, n=100, character=" "): """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents): """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed_update(ctx_encoder, total_processes, device, process_num, shard_dir, csv_path): kb_dataset = load_dataset( "csv", data_files=[csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) kb_dataset = kb_dataset.map( split_documents, batched=True, num_proc=1 ) # if you want you can load already splitted csv. kb_list = [kb_dataset.shard(total_processes, i, contiguous=True) for i in range(total_processes)] data_shrad = kb_list[process_num] arrow_folder = "data_" + str(process_num) passages_path = os.path.join(shard_dir, arrow_folder) context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained("facebook/dpr-ctx_encoder-multiset-base") ctx_encoder = ctx_encoder.to(device=device) def embed( documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast, device ) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = data_shrad.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=context_tokenizer, device=device), batched=True, batch_size=16, features=new_features, ) dataset.save_to_disk(passages_path) def add_index(shard_dir, index_path): data_shard_list = [] for shard_address in glob(str(shard_dir) + "/*/"): data_shard_list.append(load_from_disk(shard_address)) concat = concatenate_datasets(data_shard_list) faiss.omp_set_num_threads(96) index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) concat.add_faiss_index("embeddings", custom_index=index) concat.get_index("embeddings").save( index_path ) # since we load the index in to memory,we can directly update the index in the disk
transformers/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py", "repo_id": "transformers", "token_count": 1231 }
73
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class RagFinetuneExampleTests(TestCasePlus): def _create_dummy_data(self, data_dir): os.makedirs(data_dir, exist_ok=True) contents = {"source": "What is love ?", "target": "life"} n_lines = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: content = "\n".join([contents[field]] * n_lines[split]) with open(os.path.join(data_dir, f"{split}.{field}"), "w") as f: f.write(content) def _run_finetune(self, gpus: int, distributed_retriever: str = "pytorch"): tmp_dir = self.get_auto_remove_tmp_dir() output_dir = os.path.join(tmp_dir, "output") data_dir = os.path.join(tmp_dir, "data") self._create_dummy_data(data_dir=data_dir) testargs = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"--gpus={gpus}") if is_apex_available(): testargs.append("--fp16") else: testargs.append("--gpus=0") testargs.append("--distributed_backend=ddp_cpu") testargs.append("--num_processes=2") cmd = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(cmd, env=self.get_env()) metrics_save_path = os.path.join(output_dir, "metrics.json") with open(metrics_save_path) as f: result = json.load(f) return result @require_torch_gpu def test_finetune_gpu(self): result = self._run_finetune(gpus=1) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_multi_gpu def test_finetune_multigpu(self): result = self._run_finetune(gpus=2) self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_gpu @require_ray def test_finetune_gpu_ray_retrieval(self): result = self._run_finetune(gpus=1, distributed_retriever="ray") self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2) @require_torch_multi_gpu @require_ray def test_finetune_multigpu_ray_retrieval(self): result = self._run_finetune(gpus=1, distributed_retriever="ray") self.assertGreaterEqual(result["test"][0]["test_avg_em"], 0.2)
transformers/examples/research_projects/rag/_test_finetune_rag.py/0
{ "file_path": "transformers/examples/research_projects/rag/_test_finetune_rag.py", "repo_id": "transformers", "token_count": 1994 }
74
# Robust Speech Challenge 🤗 Welcome to the robust speech recognition challenge 🎙️ ! The goal of this event is to build **robust**, **real-world** speech recognition (ASR) systems in as many languages as possible 🌏🌍🌎. If necessary and available, free access to a V100S 32 GB GPU will kindly be provided by the [OVHcloud team](https://www.ovhcloud.com/) 🚀. This document summarizes all the relevant information required for the speech community event 📋. To sign-up, please see [this forum post](https://discuss.huggingface.co/t/open-to-the-community-robust-speech-recognition-challenge/13614) 🤗. Please make sure to: - Read it in detail - Fill the google form - Join our Discord server in the #join-sprint channel. ## Table of Contents - [TLDR;](#tldr) - [Important dates](#important-dates) - [How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries) - [Data and Preprocessing](#data-and-preprocessing) - [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model) - [How to fine-tune with OVH could](#how-to-finetune-with-ovh-cloud) - [How to combine n-gram language models with acoustic model](#how-to-combine-n-gram-with-acoustic-model) - [Evaluation](#evaluation) - [Prizes](#prizes) - [Communication and Problems](#communication-and-problems) - [Talks](#talks) - [General Tips & Tricks](#general-tips-and-tricks) ## TLDR Participants are encouraged to leverage pre-trained speech recognition checkpoints, preferably [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53), to train a speech recognition system in a language of their choice. Speech recognition systems should be trained using **PyTorch**, **🤗 Transformers**, and, **🤗 Datasets**. For more information on how to install the above libraries, please read through [How to install pytorch, transformers, datasets](#how-to-install-relevant-libraries). Participants can make use of whatever data they think is useful to build a speech recognition system for **real-world** audio data - **except** the Common Voice `"test"` split of their chosen language. The section [Data and preprocessing](#data-and-preprocessing) explains in more detail what audio data can be used, how to find suitable audio data, and how the audio data can be processed. For training, it is recommended to use the [official training script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py) or a modification thereof. A step-by-step guide on how to fine-tune an acoustic model for a speech recognition system can be found under [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model). If possible it is encouraged to fine-tune the acoustic models on local GPU machines, but if those are not available, the OVH could team kindly provides a limited number of GPUs for the event. Simply fill out [this google form](https://forms.gle/GFZkMkKLiufi75g28) to get access to a GPU. For more information on how to train an acoustic model on one of OVH's GPU - see [How to fine-tune a speech recognition model with OVHcould](#how-to-fine-tune-with-ovh-cloud). The performance of speech recognition system can often significantly be improved by adding a language model for decoding. For more information on how to add a language model, please take a look at [How to combine n-gram language models with speech recognition models](#how-to-combine-n-gram-with-model). During the event, the speech recognition system will be evaluated on both the Common Voice `"test"` split of the participants' chosen language as well as the *real-world* `"dev"` data provided by the Hugging Face team. At the end of the robust speech recognition challenge, the speech recognition system will also be evaluated on the *real-world* `"test"` data provided by the Hugging Face team. Each participant should add an `eval.py` script to her/his model repository in a specific format that lets one easily evaluate the speech recognition system on both Common Voice's `"test"` data as well as the *real-world* audio data. Please read through the [Evaluation](#evaluation) section to make sure your evaluation script is in the correct format. Speech recognition systems with evaluation scripts in an incorrect format can sadly not be considered for the Challenge. At the end of the event, the best performing speech recognition system will receive a prize 🏆 - more information regarding the prizes can be found under [Prizes](#prizes). We believe that framing the event as a competition is more fun, but at the core, the event is about creating speech recognition systems in as many languages as possible as a community. This can be achieved by working together, helping each other to solve bugs, share important findings, etc...🤗 **Note**: Please, read through the section on [Communication & Problems](#communication-and-problems) to make sure you know how to ask for help, etc... All important announcements will be made on discord. Please make sure that you've joined [this discord channel](https://discord.gg/SHr5wC7m) Also, please make sure that you have been added to the [Speech Event Organization](https://huggingface.co/speech-recognition-community-v2). You should have received an invite by email. If you didn't receive an invite, please contact the organizers, *e.g.* Anton, Patrick, or Omar directly on discord. ## Important dates ![timeline](https://github.com/patrickvonplaten/scientific_images/raw/master/Robush%20Speech%20Challenge.png) ## Data and preprocessing In this section, we will quickly go over how to find suitable training data and how to preprocess it. To begin with, **all data except Common Voice's `"test"` data can be used as training data.** The exception includes all Common Voice versions as the test data split of later Common Voice versions often overlaps with the one of previous versions, *e.g.* the test data of Common Voice 7 in English is to a big part identical to the test data of Common Voice 6 in English: ```python load_dataset("mozilla-foundation/common_voice_7_0", "en", split="test") ``` includes more or less the same data as ```python load_dataset("mozilla-foundation/common_voice_6_1", "en", split="test") ``` However, we strongly encourage participants to make use of Common Voice's other splits, *e.g.* `"train"` and `"validation"`. For most languages, the Common Voice dataset offers already a decent amount of training data. It is usually always advantageous to collect additional data. To do so, the participants are in a first step encouraged to search the Hugging Face Hub for additional audio data, for example by selecting the category ["speech-processing"](https://huggingface.co/datasets?task_categories=task_categories:speech-processing&sort=downloads). All datasets that are available on the Hub can be downloaded via the 🤗 Datasets library in the same way Common Voice is downloaded. If one wants to combine multiple datasets for training, it might make sense to take a look at the [`interleave_datasets`](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=interleave#datasets.interleave_datasets) function. In addition, participants can also make use of their audio data. Here, please make sure that you **are allowed to use the audio data**. E.g., if audio data is taken from media platforms, such as YouTube, it should be verified that the media platform and the owner of the data have given her/his approval to use the audio data in the context of machine learning research. If you are not sure whether the data you want to use has the appropriate licensing, please contact the Hugging Face team on discord. Next, let's talk about preprocessing. Audio data and transcriptions have to be brought into the correct format when training the acoustic model (example shown in [How to fine-tune an acoustic model](#how-to-finetune-an-acoustic-model)). It is recommended that this is done by using 🤗 Datasets `.map()` function as shown [here](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). As can be see we can pass some characters that will be removed from the transcriptions, *e.g.*: `--chars_to_ignore , ? . ! - \; \: \" “ % ‘ ” � \` on the official ["Single GPU Example"](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#single-gpu-ctc). The participants are free to modify this preprocessing by removing more characters or even replacing characters as it is done in the [official blog post](https://github.com/huggingface/transformers/blob/9a2dabae7002258e41419491c73dd43ad61b5de7/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py#L444). **However**, there are some rules regarding what characters are allowed to be removed/replaced and which are not. These rules are not this straightforward and therefore often have to be evaluated case-by-case. It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical symbols and punctuation marks. A list of such symbols can *e.g.* be found [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should be given some consideration. As another example, it is fine to remove the "Hyphen-minus" sign "`-`" since it doesn't change the meaning of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. Since those choices are not always obvious when in doubt feel free to ask on Discord or even better post your question on the forum, as was done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). ## How to install relevant libraries The following libraries are required to fine-tune a speech model with 🤗 Transformers and 🤗 Datasets in PyTorch. - [PyTorch](https://pytorch.org/) - [Transformers](https://github.com/huggingface/transformers) - [Datasets](https://github.com/huggingface/datasets) We recommend installing the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going to use and activate it. You should be able to run the command: ```bash python3 -m venv <your-venv-name> ``` You can activate your venv by running ```bash source ~/<your-venv-name>/bin/activate ``` To begin with please make sure you have PyTorch and CUDA correctly installed. The following command should return ``True``: ```bash python -c "import torch; print(torch.cuda.is_available())" ``` If the above command doesn't print ``True``, in the first step, please follow the instructions [here](https://pytorch.org/) to install PyTorch with CUDA. We strongly recommend making use of the provided PyTorch examples scripts in [transformers/examples/pytorch/speech-recognition](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition) to train your speech recognition system. In all likelihood, you will adjust one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone https://github.com/<your Github handle>/transformers.git $ cd transformers $ git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: ```bash $ git checkout -b a-descriptive-name-for-my-project ``` 4. Set up a PyTorch environment by running the following command your virtual environment: ```bash $ pip install -e ".[torch-speech]" ``` (If transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag.) If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `transformers` library. Running this command will automatically install `torch` and the most relevant libraries required for fine-tuning a speech recognition system. Next, you should also install the 🤗 Datasets library. We strongly recommend installing the library from source to profit from the most current additions during the community week. Simply run the following steps: ```bash $ cd ~/ $ git clone https://github.com/huggingface/datasets.git $ cd datasets $ pip install -e ".[streaming]" ``` If you plan on contributing a specific dataset during the community week, please fork the datasets repository and follow the instructions [here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). To verify that all libraries are correctly installed, you can run the following command in a Python shell. It verifies that both `transformers` and `datasets` have been correclty installed. ```python from transformers import AutoModelForCTC, AutoProcessor from datasets import load_dataset dummy_dataset = load_dataset("common_voice", "ab", split="test") model = AutoModelForCTC.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") model.to("cuda") processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") input_values = processor(dummy_dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=16_000).input_values input_values = input_values.to("cuda") logits = model(input_values).logits assert logits.shape[-1] == 32 ``` ## How to finetune an acoustic model In this section, we show you how to fine-tune a pre-trained [XLS-R Model](https://huggingface.co/docs/transformers/model_doc/xls_r) on the [Common Voice 7 dataset](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). We recommend fine-tuning one of the following pre-trained XLS-R checkpoints: - [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) - [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) - [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) To begin with, please note that to use the Common Voice dataset, you have to accept that **your email address** and **username** are shared with the mozilla-foundation. To get access to the dataset please click on "*Access repository*" [here](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0). Next, we recommended that you get familiar with the XLS-R model and its capabilities. In collaboration with [Fairseq's Wav2Vec2 team](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec), we've written ["Fine-tuning XLS-R for Multi-Lingual ASR with 🤗 Transformers"](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) which gives an in-detail explanation of how XLS-R functions and how it can be fine-tuned. The blog can also be opened and directly fine-tuned in a google colab notebook. In this section, we will explain how to fine-tune the model on a local machine. 1. **Log in** To begin with, you should check that you are correctly logged in and that you have `git-lfs` installed so that your fine-tuned model can automatically be uploaded. Run: ```bash huggingface-cli login ``` to login. It is recommended to login with your access token that can be found under your hugging face profile (icon in the top right corner on [hf.co](http://hf.co/), then Settings -> Access Tokens -> User Access Tokens -> New Token (if haven't generated one already) You can then copy-paste this token to log in locally. 2. **Create your model repository** First, let's make sure that `git-lfs` is correctly installed. To so, simply run: ```bash git-lfs -v ``` The output should show something like `git-lfs/2.13.2 (GitHub; linux amd64; go 1.15.4)`. If your console states that the `git-lfs` command was not found, please make sure to install it [here](https://git-lfs.github.com/) or simply via: ```bash sudo apt-get install git-lfs ``` Now you can create your model repository which will contain all relevant files to reproduce your training. You can either directly create the model repository on the Hub (Settings -> New Model) or via the CLI. Here we choose to use the CLI instead. Assuming that we want to call our model repository *xls-r-ab-test*, we can run the following command: ```bash huggingface-cli repo create xls-r-ab-test ``` You can now see the model on the Hub, *e.g.* under https://huggingface.co/hf-test/xls-r-ab-test . Let's clone the repository so that we can define our training script inside. ```bash git lfs install git clone https://huggingface.co/hf-test/xls-r-ab-test ``` 3. **Add your training script and `run`-command to the repository** We encourage participants to add all relevant files for training directly to the directory so that everything is fully reproducible. Let's first copy-paste the official training script from our clone of `transformers` to our just created directory: ```bash cp ~/transformers/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py ./ ``` Next, we'll create a bash file to define the hyper-parameters and configurations for training. More detailed information on different settings (single-GPU vs. multi-GPU) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/speech-recognition#connectionist-temporal-classification). For demonstration purposes, we will use a dummy XLS-R model `model_name_or_path="hf-test/xls-r-dummy"` on the very low-resource language of "Abkhaz" of [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0): `dataset_config_name="ab"` for just a single epoch. Before starting to train, let's make sure we have installed all the required libraries. You might want to run: ```bash pip install -r ~/transformers/examples/pytorch/speech-recognition/requirements.txt ``` Alright, finally we can define the training script. We'll simply use some dummy hyper-parameters and configurations for demonstration purposes. Note that we add the flag `--use_auth_token` so that datasets requiring access, such as [Common Voice 7](https://huggingface.co/datasets/mozilla-foundation/common_voice_7_0) can be downloaded. In addition, we add the `--push_to_hub` flag to make use of the [Trainers `push_to-hub` functionality](https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.Trainer.push_to_hub) so that your model will be automatically uploaded to the Hub. Let's copy the following code snippet in a file called `run.sh` ```bash echo '''python run_speech_recognition_ctc.py \ --dataset_name="mozilla-foundation/common_voice_7_0" \ --model_name_or_path="hf-test/xls-r-dummy" \ --dataset_config_name="ab" \ --output_dir="./" \ --overwrite_output_dir \ --max_steps="10" \ --per_device_train_batch_size="2" \ --learning_rate="3e-4" \ --save_total_limit="1" \ --eval_strategy="steps" \ --text_column_name="sentence" \ --length_column_name="input_length" \ --save_steps="5" \ --layerdrop="0.0" \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --push_to_hub \ --use_auth_token \ --do_train --do_eval''' > run.sh ``` 4. **Start training** Now all that is left to do is to start training the model by executing the run file. ```bash bash run.sh ``` The training should not take more than a couple of minutes. During the training intermediate saved checkpoints are automatically uploaded to your model repository as can be seen [on this commit](https://huggingface.co/hf-test/xls-r-ab-test/commit/0eb19a0fca4d7d163997b59663d98cd856022aa6) . At the end of the training, the [Trainer](https://huggingface.co/docs/transformers/main/en/main_classes/trainer) automatically creates a nice model card and all relevant files are uploaded. 5. **Tips for real model training** The above steps illustrate how a model can technically be fine-tuned. However as you can see on the model card [hf-test/xls-r-ab-test](https://huggingface.co/hf-test/xls-r-ab-test), our demonstration has a very poor performance which is not surprising given that we trained for just 10 steps on a randomly initialized model. For real model training, it is recommended to use one of the actual pre-trained XLS-R models: - [300M parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-300m) - [1B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-1b) - [2B parameters version](https://huggingface.co/facebook/wav2vec2-xls-r-2b) Also, the hyper-parameters should be carefully chosen depending on the dataset. As an example, we will fine-tune the 300M parameters model on Swedish on a single TITAN RTX 24GB GPU. The model will be called `"xls-r-300m-sv"`. Following the above steps we first create the model: ```bash huggingface-cli repo create xls-r-300m-sv ``` , clone it locally (assuming the `<username>` is `hf-test`) ```bash git clone hf-test/xls-r-300m-sv ``` , and, define the following hyperparameters for training ```bash echo '''python run_speech_recognition_ctc.py \ --dataset_name="mozilla-foundation/common_voice_7_0" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --dataset_config_name="sv-SE" \ --output_dir="./" \ --overwrite_output_dir \ --num_train_epochs="50" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="8" \ --gradient_accumulation_steps="4" \ --learning_rate="7.5e-5" \ --warmup_steps="2000" \ --length_column_name="input_length" \ --eval_strategy="steps" \ --text_column_name="sentence" \ --chars_to_ignore , ? . ! \- \; \: \" “ % ‘ ” � — ’ … – \ --save_steps="500" \ --eval_steps="500" \ --logging_steps="100" \ --layerdrop="0.0" \ --activation_dropout="0.1" \ --save_total_limit="3" \ --freeze_feature_encoder \ --feat_proj_dropout="0.0" \ --mask_time_prob="0.75" \ --mask_time_length="10" \ --mask_feature_prob="0.25" \ --mask_feature_length="64" \ --gradient_checkpointing \ --use_auth_token \ --fp16 \ --group_by_length \ --do_train --do_eval \ --push_to_hub''' > run.sh ``` The training takes *ca.* 7 hours and yields a reasonable test word error rate of 27% as can be seen on the automatically generated [model card](https://huggingface.co/hf-test/xls-r-300m-sv). The above-chosen hyperparameters probably work quite well on a range of different datasets and languages but are by no means optimal. It is up to you to find a good set of hyperparameters. ## How to finetune with OVH cloud [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/XkMnYocAEO0) For a more detailed guide on setting up OVHcloud please watch this video: https://youtu.be/XkMnYocAEO0 ### Creating an OVHCloud account *TIP*: If you haven't created a project on OVHcloud yet, make sure you've received your GPU voucher code *beforehand*, so that you can skip entering the credit card information. 1. If you're a US citizen, create an account via [OVHcloud.CA](https://ovhcloud.ca/). If you're from anywhere else in the world, create an account via [OVHcloud.COM](https://ovhcloud.com/). 2. Once logged in, click `Public Cloud` from the top menu and then click `Create your first OVH Public Cloud project`. Then enter a project name (e.g. "huggingface"), enter your voucher code, and click `Continue` -> `Create my project`. *Note: if you see a request for credit card details during the last step, and you can't skip it, then your voucher code is invalid. Please report it to the [#ovh-support](https://discord.gg/p4qqDV3M) channel on Discord.* ### Setting up an AI notebook 1. Go to the `Public Cloud` page and select `Project Management` -> `Users & Roles` from the menu on the left. 2. Click `+ Add user`. Write a user description (e.g. `AI Trainer`), and select an `AI Training Operator` user role. Click `Confirm`. 3. Write down the *username* and *password* (at the top of the screen) somewhere. They will be needed during step 7. 4. Select `AI & Machine Learning` -> `AI Training` from the menu on the left. Click `+ Launch a new job` on the AI Training page. 5. On the `Launch a new job` page: * In `1. Choose a region` select a region closest to you. * In `2. Enter the Docker image` select `Custom image` -> `baaastijn/ovh_huggingface`. * You can skip steps `3.` and `4.` if you will be using the Hugging Face Hub to store the models after training. * In `5. Configure your job` select **1** `GPU`. * Validate the info and Create the job. 6. On the `AI Training Jobs` screen wait until the job's status changes from `Pending` to `Running`. 7. Click `HTTP Access` from the Job's details page and log in with the AI training user you've created earlier. Once logged in, you can close the page and click `HTTP Access` to launch a JupyterLab notebook. 8. Awesome, now you have a free GPU-enabled Jupyter instance! **Note**: If you're an experienced Docker user, feel free to create a custom docker image with all of the needed packages like the one in step 5. The Dockerfile for it is available here: [baaastijn/Dockerimages](https://github.com/baaastijn/Dockerimages/tree/main/Hugginface_challenge_speech). Once you've built your image, push it to https://hub.docker.com/ and select it during the OVHcloud job creation. For more quick tutorials about OVHcloud AI products, check out the showcase https://vimeo.com/showcase/8903300 ## How to combine n-gram with acoustic model Having trained a speech recognition model with CTC as shown in the section above, one can further improve the model's performance by adding an **n-gram language model** to the decoding process of the model. By doing so, we are replacing the naive greedy decoding with **n-gram-boosted** beam search decoding. N-gram language models can be built on CPU in just a few minutes. *N-gram-boosted* beam search decoding noticeably slows down the inference time, but also yields significant word error rates improvements - usually between 10-40 %. You can find an in-detail blog post on how to build an *n-gram* [here](https://huggingface.co/blog/wav2vec2-with-ngram). The blog post can be opened in a google colab and by adapting three lines of the example for your use case, one can directly create an *n-gram* in the google colab. The blog post gives in-detail instructions on how to build an n-gram and how to add it to your trained speech recognition model. - why one should add an *n-gram* to her/his speech recognition system, - how to build an *n-gram*, and, - how to add the built *n-gram* the speech recognition system for seamless decoding Our previously trained model - [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) - enjoys a 30% word error rate reduction after having added an n-gram. As shown in the example of the blog post, we strongly advise participants to upload all files required for combining the *n-gram* with a trained speech recognition model directly into the same model repository. ## Evaluation Finally, we have arrived at the most fun part of the challenge - sitting back and watching the model transcribe audio. If possible, every participant should evaluate the speech recognition system on the test set of Common Voice 7 and ideally also on the real-world audio data (if available). For languages that have neither a Common Voice evaluation dataset nor a real world evaluation dataset, please contact the organizers on Discord so that we can work together to find some evaluation data. As a first step, one should copy the official `eval.py` script to her/his model repository. Let's use our previously trained [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) again as an example. Assuming that we have a clone of the model's repo under `~/xls-r-300m-sv`, we can copy the `eval.py` script to the repo. ```bash cp ~/transformers/examples/research_projects/robust-speech-event/eval.py ~/xls-r-300m-sv ``` Next, we should adapt `eval.py` so that it fits our evaluation data. Here it is important to keep the `eval.py` file in the following format: - 1. The following input arguments should not be changed and keep their original functionality/meaning (being to load the model and dataset): `"--model_id"`, `"--dataset"`, `"--config"`, `"--split"`. We recommend to not change any of the code written under `if __name__ == "__main__":`. - 2. The function `def log_results(result: Dataset, args: Dict[str, str])` should also not be changed. The function expects the above names attached to the `args` object as well as a `datasets.Dataset` object, called `result` which includes all predictions and target transcriptions under the names `"predictions"` and `"targets"` respectively. - 3. All other code can be changed and adapted. Participants are especially invited to change the `def normalize_text(text: str) -> str:` function as this might be a very language and model-training specific function. - 4. **Important**: It is not allowed to "cheat" in any way when in comes to pre-and postprocessing. In short, "cheating" refers to any of the following: - a. Somehow giving the model access to the target transcriptions to improve performance. The model is not allowed to use the target transcriptions to generate its predictions. - b. Pre-processing the target transcriptions in a way that makes the target transcriptions lose their original meaning. This corresponds to what has already been said in [Data and Preprocessing](#data-and-preprocessing) and is somewhat of a grey zone. It means that one should not remove characters that would make a word to lose its meaning. E.g., it is not allowed to replace all `e` in English with `i` and simply make the model learn that `e` and `i` are the same letter for a better word error rate. This would destroy the meaning of words such as `fell -> fill`. However, it is totally fine to normalize (*e.g.* lowercase) all letters, remove punctuation. There can be a lot of language-specific exceptions and in case you are not sure whether your target transcription pre-processing is allowed, please ask on the Discord channel. Uff, that was a lot of text describing how to make sure your `eval.py` script is in the correct format. If you have any questions, please ask openly in Discord. Great, now that we have adapted the `eval.py` script, we can lean back and run the evaluation. First, one should evaluate the model on Common Voice 7's test data. This might already have been done for your acoustic model during training but in case you added an *n-gram* language model after having fine-tuned the acoustic model, you should now see a nice improvement. The command to evaluate our test model [xls-r-300m-sv](https://huggingface.co/hf-test/xls-r-300m-sv) on Common Voice 7's test data is the following: ```bash cd xls-r-300m-sv ./eval.py --model_id ./ --dataset mozilla-foundation/common_voice_7_0 --config sv-SE --split test --log_outputs ``` To log each of the model's predictions with the target transcriptions, you can just add the `--log_outputs` flag. Running this command should automatically create the file: `mozilla-foundation_common_voice_7_0_sv-SE_test_eval_results.txt` that contains both the word- and character error rate. In a few days, we will give everybody access to some real-world audio data for as many languages as possible. If your language has real-world audio data, it will most likely have audio input of multiple minutes. 🤗Transformer's [ASR pipeline](https://huggingface.co/docs/transformers/main/en/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) supports audio chunking out-of-the-box. You only need to specify how song each audio chunk should be (`chunk_length_s`) and how much audio stride (`stride_length_s`) each chunk should use. For more information on the chunking works, please have a look at [this nice blog post](TODO: ). In the case of `xls-r-300m-sv`, the following command can be run: ```bash cd xls-r-300m-sv ./eval.py --model_id hf-test/xls-r-300m-sv --dataset <to-be-announced> --config sv --split validation --chunk_length_s 5.0 --stride_length_s 1.0 --log_outputs ``` Great, now you should have successfully evaluated your model. Finally, there is one **important** thing you should do so that your model is taken into account for the final evaluation. You should add two tags to your model, one being `robust-speech-event`, one being the ISO code of your chosen language, *e.g.* `"sv"` for the exemplary model we used above. You can find a list of all available languages and their ISO code [here](https://huggingface.co/languages). To add the tags, simply edit the README.md of your model repository and add ``` - "sv" - "robust-speech-event" ``` under `tags:` as done [here](https://huggingface.co/hf-test/xls-r-300m-sv/commit/a495fd70c96bb7d019729be9273a265c2557345e). To verify that you've added the tags correctly make sure that your model appears when clicking on [this link](https://huggingface.co/models?other=robust-speech-event). Great that's it! This should give you all the necessary information to evaluate your model. For the final evaluation, we will verify each evaluation result to determine the final score and thereby the winning models for each language. The final score is calculated as follows: ```bash FINAL_SCORE = 1/3 * WER_Common_Voice_7_test + 1/3 * WER_REAL_AUDIO_DEV + 1/3 * WER_REAL_AUDIO_TEST ``` The dataset `WER_REAL_AUDIO_TEST` is hidden and will only be published at the end of the robust speech challenge. If there is no real audio data for your language the final score will be computed solely based on the Common Voice 7 test dataset. If there is also no Common Voice 7 test dataset for your language, we will see together how to score your model - if this is the case, please don't be discouraged. We are especially excited about speech recognition systems of such low-resource languages and will make sure that we'll decide on a good approach to evaluating your model. ## Prizes TODO(Patrick, Omar, ...) ## Communication and Problems If you encounter any problems or have any questions, you should use one of the following platforms depending on your type of problem. Hugging Face is an "open-source-first" organization meaning that we'll try to solve all problems in the most public and most transparent way possible so that everybody in the community profits. The following table summarizes what platform to use for which problem. - Problem/question/bug with the 🤗 Datasets library that you think is a general problem that also impacts other people, please open an [Issues on Datasets](https://github.com/huggingface/datasets/issues/new?assignees=&labels=bug&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. - Problem/question/bug with the 🤗 Transformers library that you think is a general problem that also impacts other people, please open an [Issues on Transformers](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title=) and ping @anton-l and @patrickvonplaten. - Problem/question with a modified, customized training script that is less likely to impact other people, please post your problem/question [on the forum](https://discuss.huggingface.co/) and ping @anton-l and @patrickvonplaten. - Questions regarding access to the OVHcloud GPU, please ask in the Discord channel **#ovh-support**. - Other questions regarding the event, rules of the event, or if you are not sure where to post your question, please ask in the Discord channel **#sprint-discussions**. ## Talks We are very excited to be hosting 2 days of talks from Kensho-Technologies, Mozilla's Common Voice, Meta AI Research and Hugging Face. ### Thursday, January 20th Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Patrick von Platen, Hugging Face | Introduction to Robust Speech Challenge | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=X9e5Tto-Iuk) | Raymond Grossman and Jeremy Lopez, Kensho-Technologies | Pyctcdecode & Speech2text decoding | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=mp7fHMTnK9A) ### Friday, January 21th Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Gabriel Habayeb, Mozilla Common Voice | Unlocking global speech with Mozilla Common Voice | 4h30pm - 5h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=Vvn984QmAVg) | Changhan Wang, Meta AI Research | XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages | 5h30pm - 6h00pm UTC | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=ic_J7ZCROBM) ### Talks & Speakers #### Patrick von Platen, Research Engineer, Hugging Face - Talk: Introduction to Robust Speech Challenge - Abstract: In this talk, Patrick outlines the Robust Speech Challenge and gives tips and tricks on how to train and evaluate speech recognition systems with 🤗 Transformers and 🤗 Datasets, and PyTorch. - Speaker info: Patrick von Platen is a research engineer at Hugging Face and one of the core maintainers of the popular Transformers library. He specializes in speech recognition, encoder-decoder models, and long-range sequence modeling. Before joining Hugging Face, Patrick researched speech recognition at Uber AI, Cambridge University, and RWTH Aachen University. #### Raymond Grossman, Jeremy Lopez, Machine Learning Engineer, Kensho Technologies - Talk: PyCTCDecode & Speech2text decoding - Abstract: PyCTCDecode is a fast and feature-rich CTC beam search decoder for speech recognition written in Python, providing n-gram (kenlm) language model support similar to PaddlePaddle's decoder, but incorporating many new features such as byte pair encoding and real-time decoding to support models like Nvidia's Conformer-CTC or Facebook's Wav2Vec2. - Speaker info : - Raymond works as a machine learning engineer at Kensho Technologies, specializing in speech and natural language domains. Before coming to Kensho, he studied mathematics at Princeton and was an avid Kaggler under the moniker @ToTrainThemIsMyCause. - Jeremy is a machine learning engineer at Kensho Technologies and has worked on a variety of different topics including search and speech recognition. Before working at Kensho, he earned a PhD in experimental particle physics at MIT and continued doing physics research as a postdoc at the University of Colorado Boulder. #### Gabriel Habayeb, Data Engineer, Common Voice @ Mozilla - Talk: Unlocking global speech with Mozilla Common Voice - Abstract: Hear from Common Voice Data Engineer Gabriel Habayeb (Mozilla Foundation) as he talks about how Common Voice makes it easy to crowdsource voice data in global languages, as well as getting key insights into the dataset itself, how we maintain quality, use metadata - and our plans for the future! - Speaker info: Gabriel is a software developer with the Common Voice team at the Mozilla Foundation with a focus on data engineering. Before joining the Foundation, he spent the last six years working across different industries, including education, enterprise and not-for-profit organizations. #### Changhan Wang, Main author of XLS-R and Research Engineer, Meta AI Research - Talk: XLS-R: Large-Scale Cross-lingual Speech Representation Learning on 128 Languages - Abstract: In this talk, Changhan will present XLS-R, a large-scale model for cross-lingual speech representation learning based on wav2vec 2.0. XLS-R has up to 2B parameters and was trained on nearly half a million hours of publicly available speech audio in 128 languages, an order of magnitude more public data than the largest known prior work. On the CoVoST-2 speech translation benchmark, XLS-R improves the previous state of the art by an average of 7.4 BLEU over 21 translation directions into English. For speech recognition, XLS-R improves over the best known prior work on BABEL, MLS, CommonVoice as well as VoxPopuli, lowering error rates by 14-34% relative on average. XLS-R also sets a new state of the art on VoxLingua107 language identification. The XLS-R team hopes to work together with the open-source community to improve speech processing tasks for many more languages of the world. ## General Tips and Tricks - Memory efficient training: In case, you are getting out-of-memory errors on your GPU, we recommend to use [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) to replace the native memory-intensive Adam optimizer with the one of `bitsandbytes`. You can simply run the script `./run_speech_recognition_ctc_bnb.py` provided in this folder that makes use of `bitsandbytes` instead of the official one. - Dataset streaming TODO(Patrick)
transformers/examples/research_projects/robust-speech-event/README.md/0
{ "file_path": "transformers/examples/research_projects/robust-speech-event/README.md", "repo_id": "transformers", "token_count": 12253 }
75
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar # export MAX_LEN=128 python distillation.py \ --learning_rate=3e-4 \ --do_train \ --fp16 \ --val_check_interval 0.25 \ --teacher Helsinki-NLP/opus-mt-en-ro \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --student_decoder_layers 3 --student_encoder_layers 6 \ --freeze_encoder --freeze_embeds \ --model_name_or_path IGNORED \ --alpha_hid=3. \ --train_batch_size=$BS --eval_batch_size=$BS \ --tokenizer_name Helsinki-NLP/opus-mt-en-ro \ --warmup_steps 500 --logger_name wandb \ --fp16_opt_level O1 --task translation --normalize_hidden --num_sanity_val_steps=0 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh", "repo_id": "transformers", "token_count": 310 }
76
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" python distillation.py \ --teacher facebook/bart-large-xsum --data_dir xsum \ --tokenizer_name facebook/bart-large-xsum \ --student_decoder_layers 6 --student_encoder_layers 12 \ --freeze_encoder --freeze_embeds \ --learning_rate=3e-4 \ --do_train \ --do_predict \ --fp16 --fp16_opt_level=O1 \ --val_check_interval 0.1 --n_val 1000 --eval_beams 2 --length_penalty=0.5 \ --max_target_length=60 --val_max_target_length=60 --test_max_target_length=100 \ --model_name_or_path IGNORED \ --alpha_hid=3. \ --train_batch_size=16 --eval_batch_size=16 --gradient_accumulation_steps=2 \ --sortish_sampler \ --num_train_epochs=6 \ --warmup_steps 500 \ --output_dir distilbart_xsum_12_6 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/train_distilbart_xsum.sh", "repo_id": "transformers", "token_count": 317 }
77
<jupyter_start><jupyter_code># %pip install-r requirements.txt<jupyter_output><empty_output><jupyter_text>**Note**: This demo is adapted from the LXMERT Demo present here: https://github.com/huggingface/transformers/tree/main/examples/research_projects/lxmert<jupyter_code>from IPython.display import Image, display import PIL.Image import io import torch import numpy as np from processing_image import Preprocess from visualizing_image import SingleImageViz from modeling_frcnn import GeneralizedRCNN from utils import Config import utils from transformers import VisualBertForQuestionAnswering, BertTokenizerFast # URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg" URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg" OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt" ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt" VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt" # for visualizing output def showarray(a, fmt="jpeg"): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) # load object, attribute, and answer labels objids = utils.get_data(OBJ_URL) attrids = utils.get_data(ATTR_URL) vqa_answers = utils.get_data(VQA_URL) # load models and model components frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg) image_preprocess = Preprocess(frcnn_cfg) bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa") # image viz frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids) # run frcnn images, sizes, scales_yx = image_preprocess(URL) output_dict = frcnn( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=frcnn_cfg.max_detections, return_tensors="pt", ) # add boxes and labels to the image frcnn_visualizer.draw_boxes( output_dict.get("boxes"), output_dict.pop("obj_ids"), output_dict.pop("obj_probs"), output_dict.pop("attr_ids"), output_dict.pop("attr_probs"), ) showarray(frcnn_visualizer._get_buffer()) # test_questions_for_url1 = [ # "Where is this scene?", # "what is the man riding?", # "What is the man wearing?", # "What is the color of the horse?" # ] test_questions_for_url2 = [ "Where is the cat?", "What is near the disk?", "What is the color of the table?", "What is the color of the cat?", "What is the shape of the monitor?", ] # Very important that the boxes are normalized # normalized_boxes = output_dict.get("normalized_boxes") features = output_dict.get("roi_features") for test_question in test_questions_for_url2: test_question = [test_question] inputs = bert_tokenizer( test_question, padding="max_length", max_length=20, truncation=True, return_token_type_ids=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) output_vqa = visualbert_vqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_embeds=features, visual_attention_mask=torch.ones(features.shape[:-1]), token_type_ids=inputs.token_type_ids, output_attentions=False, ) # get prediction pred_vqa = output_vqa["logits"].argmax(-1) print("Question:", test_question) print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])<jupyter_output>Question: ['Where is the cat?'] prediction from VisualBert VQA: outside Question: ['What is near the disk?'] prediction from VisualBert VQA: nothing Question: ['What is the color of the table?'] prediction from VisualBert VQA: brown Question: ['What is the color of the cat?'] prediction from VisualBert VQA: gray Question: ['What is the shape of the monitor?'] prediction from VisualBert VQA: square
transformers/examples/research_projects/visual_bert/demo.ipynb/0
{ "file_path": "transformers/examples/research_projects/visual_bert/demo.ipynb", "repo_id": "transformers", "token_count": 1630 }
78