text
stringlengths
7
324k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
463
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch MobileNetV1 model. """ import unittest from transformers import MobileNetV1Config from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetV1ForImageClassification, MobileNetV1Model from transformers.models.mobilenet_v1.modeling_mobilenet_v1 import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetV1ImageProcessor class MobileNetV1ConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "tf_padding")) self.parent.assertTrue(hasattr(config, "depth_multiplier")) class MobileNetV1ModelTester: def __init__( self, parent, batch_size=13, num_channels=3, image_size=32, depth_multiplier=0.25, min_depth=8, tf_padding=True, last_hidden_size=1024, output_stride=32, hidden_act="relu6", classifier_dropout_prob=0.1, initializer_range=0.02, is_training=True, use_labels=True, num_labels=10, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.depth_multiplier = depth_multiplier self.min_depth = min_depth self.tf_padding = tf_padding self.last_hidden_size = int(last_hidden_size * depth_multiplier) self.output_stride = output_stride self.hidden_act = hidden_act self.classifier_dropout_prob = classifier_dropout_prob self.use_labels = use_labels self.is_training = is_training self.num_labels = num_labels self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None pixel_labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels, pixel_labels def get_config(self): return MobileNetV1Config( num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels, pixel_labels): model = MobileNetV1Model(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.last_hidden_state.shape, ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ), ) def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): config.num_labels = self.num_labels model = MobileNetV1ForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels, pixel_labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class MobileNetV1ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as MobileNetV1 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (MobileNetV1Model, MobileNetV1ForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": MobileNetV1Model, "image-classification": MobileNetV1ForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = MobileNetV1ModelTester(self) self.config_tester = MobileNetV1ConfigTester(self, config_class=MobileNetV1Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="MobileNetV1 does not output attentions") def test_attention_outputs(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_stages = 26 self.assertEqual(len(hidden_states), expected_num_stages) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = MobileNetV1Model.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class MobileNetV1ModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( MobileNetV1ImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = MobileNetV1ForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1001)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-4.1739, -1.1233, 3.1205]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
transformers/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py/0
{ "file_path": "transformers/tests/models/mobilenet_v1/test_modeling_mobilenet_v1.py", "repo_id": "transformers", "token_count": 3818 }
364
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class OpenAIGPTTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "openai-community/openai-gpt" """Tests OpenAIGPTTokenizer that uses BERT BasicTokenizer.""" tokenizer_class = OpenAIGPTTokenizer rust_tokenizer_class = OpenAIGPTTokenizerFast test_rust_tokenizer = True test_seq2seq = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" def test_full_tokenizer(self): tokenizer = OpenAIGPTTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def test_padding(self, max_length=15): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) # Simple input s = "This is a simple input" s2 = ["This is a simple input 1", "This is a simple input 2"] p = ("This is a simple input", "This is a pair") p2 = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(ValueError, tokenizer_r.encode, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises(ValueError, tokenizer_r.encode_plus, s, max_length=max_length, padding="max_length") # Simple input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, s2, max_length=max_length, padding="max_length", ) # Pair input self.assertRaises(ValueError, tokenizer_r.encode, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises(ValueError, tokenizer_r.encode_plus, p, max_length=max_length, padding="max_length") # Pair input self.assertRaises( ValueError, tokenizer_r.batch_encode_plus, p2, max_length=max_length, padding="max_length", ) # tokenizer has no padding token def test_padding_different_model_input_name(self): pass @require_ftfy @require_spacy @require_tokenizers class OpenAIGPTTokenizationTestWithSpacy(OpenAIGPTTokenizationTest): """Tests OpenAIGPTTokenizer that uses SpaCy and ftfy.""" pass
transformers/tests/models/openai/test_tokenization_openai.py/0
{ "file_path": "transformers/tests/models/openai/test_tokenization_openai.py", "repo_id": "transformers", "token_count": 2429 }
365
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile import unittest from transformers import ProphetNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetTokenizer, ) from transformers.modeling_outputs import BaseModelOutput class ProphetNetModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=9, # For common tests is_training=True, use_attention_mask=True, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=True, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 7 self.num_hidden_states_types = 3 # encoder, decoder_main, decoder_ngram self.decoder_attention_idx = 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_config(self): return ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, decoder_input_ids, decoder_attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_decoder_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) # cross-attention + uni-directional self-attention def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 5) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_causal_lm_decoder( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() outputs = model( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_decoder_generate_with_past_key_value_states( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetForCausalLM(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=10, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=10, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = ProphetNetModel(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [ProphetNetModel, ProphetNetForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them if model_class == ProphetNetForConditionalGeneration: model.prophetnet.encoder.load_state_dict(model.prophetnet.decoder.state_dict(), strict=False) else: model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_fast_integration( self, config, *args, ): input_ids = torch.tensor([[7, 4, 78, 0, 24, 52, 43]], device=torch_device, dtype=torch.long) decoder_input_ids = torch.tensor([[12, 62, 25, 11, 47, 15, 14]], device=torch_device, dtype=torch.long) attention_mask = torch.tensor([[1, 1, 1, 0, 1, 0, 0]], device=torch_device, dtype=torch.long) decoder_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 1, 0]], device=torch_device, dtype=torch.long) lm_labels = torch.tensor([[62, 25, 11, 47, 15, 14, 24]], device=torch_device, dtype=torch.long) torch.manual_seed(0) config.ngram = 4 model = ProphetNetForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertTrue(torch.allclose(result.loss, torch.tensor(4.5892, device=torch_device), atol=1e-3)) expected_logit_slice = torch.tensor( [-0.0184, 0.0758, -0.0543, -0.0093, 0.0050, -0.0660, -0.1453], device=torch_device ) self.parent.assertTrue(torch.allclose(result.logits[0, :, 1], expected_logit_slice, atol=1e-3)) def check_model_with_attn_mask(self, config, input_ids, decoder_input_ids, *args): model = ProphetNetModel(config=config) model.to(torch_device) model.eval() outputs_no_mask = model(input_ids=input_ids[:, :5], decoder_input_ids=decoder_input_ids[:, :5]) attention_mask = torch.ones_like(input_ids) decoder_attention_mask = torch.ones_like(decoder_input_ids) attention_mask[:, 5:] = 0 outputs_with_mask = model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, ) # check encoder self.parent.assertTrue( torch.allclose( outputs_no_mask.encoder_last_hidden_state[0, :, 0], outputs_with_mask.encoder_last_hidden_state[0, :5, 0], atol=1e-3, ) ) # check decoder # main stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state[0, :, 0], outputs_with_mask.last_hidden_state[0, :5, 0], atol=1e-3 ) ) # predict stream self.parent.assertTrue( torch.allclose( outputs_no_mask.last_hidden_state_ngram[0, :5, 0], outputs_with_mask.last_hidden_state_ngram[0, :5, 0], atol=1e-2, ) ) def check_causal_lm_from_pretrained( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, *args ): model = ProphetNetForConditionalGeneration(config).to(torch_device).eval() with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) decoder = ProphetNetForCausalLM.from_pretrained(tmp_dirname).to(torch_device) encoder_hidden_states = model.prophetnet.encoder(input_ids).last_hidden_state model_outputs = model( encoder_outputs=BaseModelOutput(last_hidden_state=encoder_hidden_states), decoder_input_ids=decoder_input_ids, ) dec_outputs = decoder(encoder_hidden_states=encoder_hidden_states, input_ids=decoder_input_ids) self.parent.assertTrue( torch.allclose( model_outputs.logits[0, :5], dec_outputs.logits[0, :5], atol=1e-3, ) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "use_cache": False, } return config, inputs_dict class ProphetNetStandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=True, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, ngram=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.ngram = ngram self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.num_hidden_states_types = 2 # decoder_main, decoder_ngram self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ngram=self.ngram, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, attention_mask, lm_labels, ) = self.prepare_config_and_inputs() encoder_hidden_states = floats_tensor([self.batch_size, self.encoder_seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = ProphetNetDecoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = ProphetNetDecoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class ProphetNetStandaloneEncoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, hidden_size=16, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, is_decoder=False, use_attention_mask=True, add_cross_attention=False, use_cache=False, use_labels=True, decoder_start_token_id=0, encoder_ffn_dim=32, num_encoder_layers=2, num_encoder_attention_heads=4, decoder_ffn_dim=32, num_decoder_layers=2, num_decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, num_buckets=32, relative_max_distance=128, disable_ngram_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_decoder_layers self.num_encoder_layers = num_encoder_layers self.num_decoder_layers = num_decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_ffn_dim = encoder_ffn_dim self.num_attention_heads = num_decoder_attention_heads self.num_encoder_attention_heads = num_encoder_attention_heads self.num_decoder_attention_heads = num_decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.num_buckets = num_buckets self.relative_max_distance = relative_max_distance self.use_cache = use_cache self.disable_ngram_loss = disable_ngram_loss self.max_position_embeddings = max_position_embeddings self.add_cross_attention = add_cross_attention self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 1 self.num_hidden_states_types = 1 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = ProphetNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_encoder_layers=self.num_encoder_layers, num_decoder_layers=self.num_decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_ffn_dim=self.encoder_ffn_dim, num_encoder_attention_heads=self.num_encoder_attention_heads, num_decoder_attention_heads=self.num_decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, num_buckets=self.num_buckets, relative_max_distance=self.relative_max_distance, disable_ngram_loss=self.disable_ngram_loss, max_position_embeddings=self.max_position_embeddings, add_cross_attention=self.add_cross_attention, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class ProphetNetModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetModel, ProphetNetForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (ProphetNetForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": ProphetNetForConditionalGeneration, "feature-extraction": ProphetNetModel, "summarization": ProphetNetForConditionalGeneration, "text-generation": ProphetNetForCausalLM, "text2text-generation": ProphetNetForConditionalGeneration, "translation": ProphetNetForConditionalGeneration, } if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False is_encoder_decoder = True # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `ProphetNetConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def setUp(self): self.model_tester = ProphetNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_lm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_only_decoder_causal_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_causal_lm_decoder(*config_and_inputs) def test_fast_integration(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_fast_integration(*config_and_inputs) def test_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) def test_shift_labels_via_shift_left(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) @unittest.skip("Flaky test with no simple resolution. TODO Fix me @patrickvonplaten") def test_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_value_states(*config_and_inputs) def test_encoder_decoder_model_generate(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_generate_with_past_key_value_states(*config_and_inputs) def test_attn_mask_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_model_with_attn_mask(*config_and_inputs) def test_config_save(self): config = self.model_tester.prepare_config_and_inputs()[0] config.add_cross_attention = False with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config = ProphetNetConfig.from_pretrained(tmp_dirname) self.assertFalse(config.add_cross_attention) def test_causal_lm_from_pretrained(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_causal_lm_from_pretrained(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Cant do half precision") def test_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) # methods overwrite method in `test_modeling_common.py` def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) chunk_length = getattr(self.model_tester, "chunk_length", None) if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) correct_outlen = 7 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, (self.model_tester.ngram + 1) * decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) if chunk_length is not None: self.assertListEqual( list(self_attentions[0].shape[-4:]), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], ) else: self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) def test_generate_with_head_masking(self): """Generating with head_masking has not been implemented for ProphetNet models yet.""" pass @require_torch class ProphetNetStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetDecoder, ProphetNetForCausalLM) if is_torch_available() else () all_generative_model_classes = (ProphetNetForCausalLM,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return @require_torch class ProphetNetStandaloneEncoderModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ProphetNetEncoder,) if is_torch_available() else () test_pruning = False test_resize_embeddings = False is_encoder_decoder = False def setUp(self): self.model_tester = ProphetNetStandaloneEncoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class=ProphetNetConfig) def test_config(self): self.config_tester.run_common_tests() @require_torch class ProphetNetModelIntegrationTest(unittest.TestCase): @slow def test_pretrained_checkpoint_hidden_states(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased") model.to(torch_device) # encoder-decoder outputs encoder_ids = torch.tensor( [ [ 2871, 102, 2048, 3176, 2780, 1997, 2871, 26727, 2169, 2097, 12673, 1996, 8457, 2006, 2049, 8240, 2859, 2799, 1012, 2023, 6512, 2038, 2174, 13977, 2195, 25962, 1012, 102, ] ] ).to(torch_device) decoder_prev_ids = torch.tensor([[102, 2129, 2116, 2372, 2024, 2006, 2169, 1997, 2122, 2048, 2780, 1029]]).to( torch_device ) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids, ) output_predited_logits = output[0] expected_shape = torch.Size((1, 12, 30522)) self.assertEqual(output_predited_logits.shape, expected_shape) expected_slice = torch.tensor( [[[-7.7729, -8.0343, -8.26001], [-7.74213, -7.8629, -8.6000], [-7.7328, -7.8269, -8.5264]]] ).to(torch_device) # self.assertTrue(torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(output_predited_logits[:, :3, :3], expected_slice, atol=1e-4) # encoder outputs encoder_outputs = model.prophetnet.encoder(encoder_ids)[0] expected_encoder_outputs_slice = torch.tensor( [[[-0.2526, -0.1951, -0.2185], [-0.8923, 0.2992, -0.4623], [-0.4585, 0.0165, -0.6652]]] ).to(torch_device) expected_shape_encoder = torch.Size((1, 28, 1024)) self.assertEqual(encoder_outputs.shape, expected_shape_encoder) # self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)) assert torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4) # decoder outputs decoder_outputs = model.prophetnet.decoder(decoder_prev_ids, encoder_hidden_states=encoder_outputs) predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 12, -1) predicting_streams_logits = model.lm_head(predicting_streams) next_first_stream_logits = predicting_streams_logits[:, 0] # self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)) assert torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4) @slow def test_cnndm_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") model.config.max_length = 512 model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-cnndm") ARTICLE_TO_SUMMARIZE = ( "USTC was founded in Beijing by the Chinese Academy of Sciences (CAS) in September 1958. The Director of" " CAS, Mr. Guo Moruo was appointed the first president of USTC. USTC's founding mission was to develop a" " high-level science and technology workforce, as deemed critical for development of China's economy," ' defense, and science and technology education. The establishment was hailed as "A Major Event in the' ' History of Chinese Education and Science." CAS has supported USTC by combining most of its institutes' " with the departments of the university. USTC is listed in the top 16 national key universities, becoming" " the youngest national key university.".lower() ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=511, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_512 = ( "us ##tc was founded by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc is listed in the" " top 16 national key universities ." ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_512], generated_titles, ) input_ids = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=99, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) # actually 98 tokens are used. max_length=100 contains bos and eos. summary_ids = model.generate( input_ids, num_beams=4, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) EXPECTED_SUMMARIZE_100 = ( r"us ##tc was founded in beijing by the chinese academy of sciences ( cas ) in 1958 . [X_SEP] us ##tc " "'" " s founding mission was to develop a high - level science and technology workforce . [X_SEP]" ' establishment hailed as " a major event in the history of chinese education and science "' ) generated_titles = [ " ".join(tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True)) for g in summary_ids ] self.assertListEqual( [EXPECTED_SUMMARIZE_100], generated_titles, ) @slow def test_question_gen_inference(self): model = ProphetNetForConditionalGeneration.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") model.to(torch_device) tokenizer = ProphetNetTokenizer.from_pretrained("microsoft/prophetnet-large-uncased-squad-qg") INPUTS = [ "Bill Gates [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", "April 4, 1975 [SEP] Microsoft was founded by Bill Gates and Paul Allen on April 4, 1975.", ] input_ids = tokenizer(INPUTS, truncation=True, padding=True, return_tensors="pt").input_ids input_ids = input_ids.to(torch_device) gen_output = model.generate(input_ids, num_beams=5, early_stopping=True) generated_questions = tokenizer.batch_decode(gen_output, skip_special_tokens=True) EXPECTED_QUESTIONS = [ "along with paul allen, who founded microsoft?", "what year was microsoft founded?", "when was microsoft founded?", ] self.assertListEqual( EXPECTED_QUESTIONS, generated_questions, )
transformers/tests/models/prophetnet/test_modeling_prophetnet.py/0
{ "file_path": "transformers/tests/models/prophetnet/test_modeling_prophetnet.py", "repo_id": "transformers", "token_count": 25656 }
366
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class RagTokenizerTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) @require_tokenizers def test_save_load_pretrained_with_saved_config(self): save_dir = os.path.join(self.tmpdirname, "rag_tokenizer") rag_config = RagConfig(question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict()) rag_tokenizer = RagTokenizer(question_encoder=self.get_dpr_tokenizer(), generator=self.get_bart_tokenizer()) rag_config.save_pretrained(save_dir) rag_tokenizer.save_pretrained(save_dir) new_rag_tokenizer = RagTokenizer.from_pretrained(save_dir, config=rag_config) self.assertIsInstance(new_rag_tokenizer.question_encoder, DPRQuestionEncoderTokenizerFast) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab(), rag_tokenizer.question_encoder.get_vocab()) self.assertIsInstance(new_rag_tokenizer.generator, BartTokenizerFast) self.assertEqual(new_rag_tokenizer.generator.get_vocab(), rag_tokenizer.generator.get_vocab()) @slow def test_pretrained_token_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-token-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict) @slow def test_pretrained_sequence_nq_tokenizer(self): tokenizer = RagTokenizer.from_pretrained("facebook/rag-sequence-nq") input_strings = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] input_dict = tokenizer(input_strings) self.assertIsNotNone(input_dict)
transformers/tests/models/rag/test_tokenization_rag.py/0
{ "file_path": "transformers/tests/models/rag/test_tokenization_rag.py", "repo_id": "transformers", "token_count": 3143 }
367
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers import SPIECE_UNDERLINE, AddedToken, BatchEncoding, SiglipTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class SiglipTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/siglip-base-patch16-224" tokenizer_class = SiglipTokenizer test_rust_tokenizer = False test_sentencepiece = True test_sentencepiece_ignore_case = True # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.setUp with T5->Siglip def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_convert_token_and_id with T5->Siglip def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "<s>" token_id = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") def test_full_tokenizer(self): tokenizer = SiglipTokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁this", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [66, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [7, 23, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 12, 66, 46, 72, 80, 6, 0]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE, "i", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ], ) @cached_property def siglip_tokenizer(self): return SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.get_tokenizer with T5->Siglip def get_tokenizer(self, **kwargs) -> SiglipTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_rust_and_python_full_tokenizers with T5->Siglip def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.siglip_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) def test_empty_target_text(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.siglip_tokenizer tgt_text = ["Summary of the text.", "Another summary."] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_eos_in_input(self): tokenizer = self.siglip_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [262, 266, 476, 8532, 270, 4460, 3949, 1682, 1] expected_tgt_tokens = [6254, 267, 260, 1443, 1] batch = tokenizer(src_text, text_target=tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, batch["labels"][0]) @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_subword_regularization_tokenizer(self): pass @unittest.skip(reason="SiglipTokenizer strips the punctuation") def test_pickle_subword_regularization_tokenizer(self): pass # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization with T5->Siglip def test_special_tokens_initialization(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): added_tokens = [f"<extra_id_{i}>" for i in range(100)] + [AddedToken("<special>", lstrip=True)] tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) tokenizer_cr = self.rust_tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs, from_slow=True ) tokenizer_p = self.tokenizer_class.from_pretrained( pretrained_name, additional_special_tokens=added_tokens, **kwargs ) p_output = tokenizer_p.encode("Hey this is a <special> token") r_output = tokenizer_r.encode("Hey this is a <special> token") cr_output = tokenizer_cr.encode("Hey this is a <special> token") special_token_id = tokenizer_r.encode("<special>", add_special_tokens=False)[0] self.assertEqual(p_output, r_output) self.assertEqual(cr_output, r_output) self.assertTrue(special_token_id in p_output) self.assertTrue(special_token_id in r_output) self.assertTrue(special_token_id in cr_output) # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_special_tokens_initialization_with_non_empty_additional_special_tokens with T5->Siglip def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(100)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # BySiglipTokenization no vocab self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_sentencepiece_tokenize_and_convert_tokens_to_string(self): """Test ``_tokenize`` and ``convert_tokens_to_string``.""" if not self.test_sentencepiece: return tokenizer = self.get_tokenizer() text = "This is text to test the tokenizer." if self.test_sentencepiece_ignore_case: text = text.lower() tokens = tokenizer.tokenize(text) self.assertTrue(len(tokens) > 0) # check if converting back to original text works reverse_text = tokenizer.convert_tokens_to_string(tokens) if self.test_sentencepiece_ignore_case: reverse_text = reverse_text.lower() expected_text = "this is text to test the tokenizer" self.assertEqual(reverse_text, expected_text) special_tokens = tokenizer.all_special_tokens special_tokens_string = tokenizer.convert_tokens_to_string(special_tokens) for special_token in special_tokens: self.assertIn(special_token, special_tokens_string) if self.test_rust_tokenizer: rust_tokenizer = self.get_rust_tokenizer() special_tokens_string_rust = rust_tokenizer.convert_tokens_to_string(special_tokens) self.assertEqual(special_tokens_string, special_tokens_string_rust) # overwritten from `test_tokenization_common` since Siglip has no max length # Copied from tests.models.t5.test_tokenization_t5.T5TokenizationTest.test_pretrained_model_lists with T5->Siglip def test_pretrained_model_lists(self): # We should have at least one default checkpoint for each tokenizer # We should specify the max input length as well (used in some part to list the pretrained checkpoints) self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1) @slow def test_tokenizer_integration(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224") # fmt: off texts = [ 'the real mountain view', 'Zürich', 'San Francisco', 'a picture of a laptop with the lockscreen on, a cup of cappucino, salt and pepper grinders. The view through the window reveals lake Zürich and the Alps in the background of the city.', ] expected_input_ids = [ [260, 638, 3293, 870, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 761, 5879, 5345, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 264, 452, 20563, 15949, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [262, 266, 1357, 267, 262, 266, 4429, 275, 260, 3940, 6360, 277, 262, 266, 3064, 267, 3549, 388, 16538, 296, 298, 2617, 263, 4869, 14998, 264, 260, 870, 393, 260, 1710, 7958, 4324, 262, 761, 5879, 5345, 263, 260, 1518, 388, 264, 268, 260, 1970, 267, 260, 741, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ] # fmt: on for text, expected in zip(texts, expected_input_ids): input_ids = tokenizer(text, padding="max_length").input_ids self.assertListEqual(input_ids, expected) def test_some_edge_cases(self): tokenizer = SiglipTokenizer.from_pretrained("google/siglip-base-patch16-224", legacy=False) sp_tokens = tokenizer.sp_model.encode("</s>>", out_type=str) self.assertEqual(sp_tokens, ["</", "s", ">", ">"]) tokens = tokenizer.tokenize("</s>>") self.assertNotEqual(sp_tokens, tokens) self.assertEqual(tokens, ["</s>"]) tokens = tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("", out_type=str)) tokens = tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode(" ", out_type=str)) tokens = tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) tokens = tokenizer.tokenize(" ▁") self.assertEqual(tokens, []) self.assertEqual(tokens, tokenizer.sp_model.encode("▁", out_type=str)) @require_sentencepiece @require_tokenizers class CommonSpmIntegrationTests(unittest.TestCase): """ A class that regroups important test to make sure that we properly handle the special tokens. """ @classmethod def setUpClass(cls): tokenizer = SiglipTokenizer(SAMPLE_VOCAB, extra_ids=0, legacy=False) tokenizer.add_special_tokens( {"additional_special_tokens": [AddedToken("<extra_id_0>", rstrip=False, lstrip=False)]} ) cls.tokenizer = tokenizer def test_add_dummy_prefix(self): # make sure `'▁'` is prepended, and outputs match sp_model's # `sentencepiece.NormalizerSpec.add_dummy_prefix` attribute input_ids = self.tokenizer.encode(". Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(". Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) tokens = self.tokenizer.tokenize("") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("", out_type=str)) tokens = self.tokenizer.tokenize(" ") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode(" ", out_type=str)) tokens = self.tokenizer.tokenize("▁") self.assertEqual(tokens, []) self.assertEqual(tokens, self.tokenizer.sp_model.encode("▁", out_type=str)) def test_remove_extra_whitespaces(self): # make sure the extra spaces are eaten # sentencepiece.NormalizerSpec.remove_extra_whitespaces attribute input_ids = self.tokenizer.encode(" . Hello", add_special_tokens=False) self.assertEqual(input_ids, [37, 86, 20]) self.assertEqual(input_ids, [37, 86, 20]) tokens = self.tokenizer.tokenize(" . Hello") self.assertEqual(tokens, ["▁he", "ll", "o"]) # `'▁'` is also a whitespace input_ids = self.tokenizer.encode("▁He is not") self.assertEqual(input_ids, [37, 46, 44, 2]) tokens = self.tokenizer.tokenize("▁He is not") self.assertEqual(tokens, ["▁he", "▁is", "▁not"]) # no extra space added input_ids = self.tokenizer.encode("▁He is not ▁He") self.assertEqual(input_ids, [37, 46, 44, 37, 2]) tokens = self.tokenizer.tokenize("▁He is not ▁He") self.assertEqual(tokens, ["▁he", "▁is", "▁not", "▁he"]) # spaces are eaten by spm even if not start
transformers/tests/models/siglip/test_tokenization_siglip.py/0
{ "file_path": "transformers/tests/models/siglip/test_tokenization_siglip.py", "repo_id": "transformers", "token_count": 9776 }
368
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 processors.""" import json import os import shutil import tempfile import unittest from transformers import is_speech_available, is_torch_available from transformers.models.speecht5 import SpeechT5Tokenizer from transformers.testing_utils import get_tests_dir, require_torch from transformers.utils import FEATURE_EXTRACTOR_NAME if is_speech_available() and is_torch_available(): from transformers import SpeechT5FeatureExtractor, SpeechT5Processor from .test_feature_extraction_speecht5 import floats_list SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model") @require_torch class SpeechT5ProcessorTest(unittest.TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() tokenizer = SpeechT5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) feature_extractor_map = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16000, "do_normalize": False, "num_mel_bins": 80, "hop_length": 16, "win_length": 64, "win_function": "hann_window", "fmin": 80, "fmax": 7600, "mel_floor": 1e-10, "reduction_factor": 2, "return_attention_mask": True, } self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(feature_extractor_map) + "\n") def get_tokenizer(self, **kwargs): return SpeechT5Tokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_feature_extractor(self, **kwargs): return SpeechT5FeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) processor.save_pretrained(self.tmpdirname) processor = SpeechT5Processor.from_pretrained(self.tmpdirname) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_save_load_pretrained_additional_features(self): processor = SpeechT5Processor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) processor = SpeechT5Processor.from_pretrained( self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, SpeechT5Tokenizer) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, SpeechT5FeatureExtractor) def test_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio=raw_speech, return_tensors="np") input_processor = processor(audio=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_feature_extractor_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(audio_target=raw_speech, return_tensors="np") input_processor = processor(audio_target=raw_speech, return_tensors="np") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_target(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) input_str = "This is a test string" encoded_processor = processor(text_target=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) def test_model_input_names(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() processor = SpeechT5Processor(tokenizer=tokenizer, feature_extractor=feature_extractor) self.assertListEqual( processor.model_input_names, feature_extractor.model_input_names, msg="`processor` and `feature_extractor` model input names do not match", )
transformers/tests/models/speecht5/test_processor_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_processor_speecht5.py", "repo_id": "transformers", "token_count": 2860 }
369
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import unittest import numpy as np import pandas as pd from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TapasConfig, TapasTokenizer, is_tf_available, ) from transformers.models.auto import get_values from transformers.testing_utils import require_tensorflow_probability, require_tf, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, ) from transformers.models.tapas.modeling_tf_tapas import ( IndexMap, ProductIndexMap, flatten, gather, range_index_map, reduce_max, reduce_mean, reduce_sum, ) class TFTapasModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, max_position_embeddings=512, type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10], type_sequence_label_size=2, positive_weight=10.0, num_aggregation_labels=4, num_labels=2, aggregation_loss_importance=0.8, use_answer_as_supervision=True, answer_loss_importance=0.001, use_normalized_answer_loss=False, huber_loss_delta=25.0, temperature=1.0, agg_temperature=1.0, use_gumbel_for_cells=False, use_gumbel_for_agg=False, average_approximation_function="ratio", cell_selection_preference=0.5, answer_loss_cutoff=100, max_num_rows=64, max_num_columns=32, average_logits_per_cell=True, select_one_column=True, allow_empty_column_selection=False, init_cell_selection_weights_to_zero=True, reset_position_index_per_cell=True, disable_per_token_loss=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.max_position_embeddings = max_position_embeddings self.type_vocab_sizes = type_vocab_sizes self.type_sequence_label_size = type_sequence_label_size self.positive_weight = positive_weight self.num_aggregation_labels = num_aggregation_labels self.num_labels = num_labels self.aggregation_loss_importance = aggregation_loss_importance self.use_answer_as_supervision = use_answer_as_supervision self.answer_loss_importance = answer_loss_importance self.use_normalized_answer_loss = use_normalized_answer_loss self.huber_loss_delta = huber_loss_delta self.temperature = temperature self.agg_temperature = agg_temperature self.use_gumbel_for_cells = use_gumbel_for_cells self.use_gumbel_for_agg = use_gumbel_for_agg self.average_approximation_function = average_approximation_function self.cell_selection_preference = cell_selection_preference self.answer_loss_cutoff = answer_loss_cutoff self.max_num_rows = max_num_rows self.max_num_columns = max_num_columns self.average_logits_per_cell = average_logits_per_cell self.select_one_column = select_one_column self.allow_empty_column_selection = allow_empty_column_selection self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero self.reset_position_index_per_cell = reset_position_index_per_cell self.disable_per_token_loss = disable_per_token_loss self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = [] for type_vocab_size in self.type_vocab_sizes: token_type_ids.append(ids_tensor(shape=[self.batch_size, self.seq_length], vocab_size=type_vocab_size)) token_type_ids = tf.stack(token_type_ids, axis=2) sequence_labels = None token_labels = None labels = None numeric_values = None numeric_values_scale = None float_answer = None aggregation_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) labels = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) numeric_values = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) numeric_values_scale = ids_tensor([self.batch_size, self.seq_length], vocab_size=2, dtype=tf.float32) float_answer = ids_tensor([self.batch_size], vocab_size=2, dtype=tf.float32) aggregation_labels = ids_tensor([self.batch_size], self.num_aggregation_labels) config = self.get_config() return ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) def get_config(self): return TapasConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_sizes=self.type_vocab_sizes, initializer_range=self.initializer_range, positive_weight=self.positive_weight, num_aggregation_labels=self.num_aggregation_labels, num_labels=self.num_labels, aggregation_loss_importance=self.aggregation_loss_importance, use_answer_as_supervision=self.use_answer_as_supervision, answer_loss_importance=self.answer_loss_importance, use_normalized_answer_loss=self.use_normalized_answer_loss, huber_loss_delta=self.huber_loss_delta, temperature=self.temperature, agg_temperature=self.agg_temperature, use_gumbel_for_cells=self.use_gumbel_for_cells, use_gumbel_for_agg=self.use_gumbel_for_agg, average_approximation_function=self.average_approximation_function, cell_selection_preference=self.cell_selection_preference, answer_loss_cutoff=self.answer_loss_cutoff, max_num_rows=self.max_num_rows, max_num_columns=self.max_num_columns, average_logits_per_cell=self.average_logits_per_cell, select_one_column=self.select_one_column, allow_empty_column_selection=self.allow_empty_column_selection, init_cell_selection_weights_to_zero=self.init_cell_selection_weights_to_zero, reset_position_index_per_cell=self.reset_position_index_per_cell, disable_per_token_loss=self.disable_per_token_loss, ) def create_and_check_model( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) inputs.pop("attention_mask") result = model(inputs) inputs.pop("token_type_ids") result = model(inputs) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): model = TFTapasForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": token_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): config.num_labels = self.num_labels model = TFTapasForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "labels": sequence_labels, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ): # inference: without aggregation head (SQA). Model only returns logits sqa_config = copy.copy(config) sqa_config.num_aggregation_labels = 0 sqa_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # inference: with aggregation head (WTQ, WikiSQL-supervised). Model returns logits and aggregation logits model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # training: can happen in 3 main ways # case 1: conversational (SQA) model = TFTapasForQuestionAnswering(config=sqa_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) model = TFTapasForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "numeric_values": numeric_values, "numeric_values_scale": numeric_values_scale, "float_answer": float_answer, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) # case 3: strong supervision for aggregation (WikiSQL-supervised) wikisql_config = copy.copy(config) wikisql_config.use_answer_as_supervision = False model = TFTapasForQuestionAnswering(config=wikisql_config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, "labels": labels, "aggregation_labels": aggregation_labels, } result = model(inputs) self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, token_type_ids, sequence_labels, token_labels, labels, numeric_values, numeric_values_scale, float_answer, aggregation_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tensorflow_probability @require_tf class TFTapasModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFTapasModel, TFTapasForMaskedLM, TFTapasForSequenceClassification, TFTapasForQuestionAnswering, ) if is_tf_available() else () ) pipeline_model_mapping = ( { "feature-extraction": TFTapasModel, "fill-mask": TFTapasForMaskedLM, "text-classification": TFTapasForSequenceClassification, "zero-shot": TFTapasForSequenceClassification, } if is_tf_available() else {} ) test_head_masking = False test_onnx = False # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name ): return True def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) inputs_dict["aggregation_labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["numeric_values"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["numeric_values_scale"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.float32 ) inputs_dict["float_answer"] = tf.zeros(self.model_tester.batch_size, dtype=tf.float32) elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict def setUp(self): self.model_tester = TFTapasModelTester(self) self.config_tester = ConfigTester(self, config_class=TapasConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_dataset_conversion(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_keras_fit(self): pass @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") def test_loss_computation(self): pass def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], } queries = "Which footballer is 33 years old?" table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_inference(): # Here we prepare a batch of 2 table-question pairs to test TAPAS inference on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "How many goals does Ronaldo have?"] table = pd.DataFrame.from_dict(data) return table, queries def prepare_tapas_batch_inputs_for_training(): # Here we prepare a DIFFERENT batch of 2 table-question pairs to test TAPAS training on: data = { "Footballer": ["Lionel Messi", "Cristiano Ronaldo"], "Age": ["33", "35"], "Number of goals": ["712", "750"], } queries = ["Which footballer is 33 years old?", "What's the total number of goals?"] table = pd.DataFrame.from_dict(data) answer_coordinates = [[(0, 0)], [(0, 2), (1, 2)]] answer_text = [["Lionel Messi"], ["1462"]] float_answer = [float("NaN"), float("1462")] return table, queries, answer_coordinates, answer_text, float_answer @require_tensorflow_probability @require_tf class TFTapasModelIntegrationTest(unittest.TestCase): @cached_property def default_tokenizer(self): return TapasTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") @slow def test_inference_no_head(self): # ideally we want to test this with the weights of tapas_inter_masklm_base_reset, # but since it's not straightforward to do this with the TF 1 implementation, we test it with # the weights of the WTQ base model (i.e. tapas_wtq_wikisql_sqa_inter_masklm_base_reset) model = TFTapasModel.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the sequence output expected_slice = tf.constant( [ [ [-0.141581565, -0.599805772, 0.747186482], [-0.143664181, -0.602008104, 0.749218345], [-0.15169853, -0.603363097, 0.741370678], ] ] ) tf.debugging.assert_near(outputs.last_hidden_state[:, :3, :3], expected_slice, atol=0.0005) # test the pooled output expected_slice = tf.constant([[0.987518311, -0.970520139, -0.994303405]]) tf.debugging.assert_near(outputs.pooler_output[:, :3], expected_slice, atol=0.0005) @unittest.skip(reason="Model not available yet") def test_inference_masked_lm(self): pass # TapasForQuestionAnswering has 3 possible ways of being fine-tuned: # - conversational set-up (SQA) # - weak supervision for aggregation (WTQ, WikiSQL) # - strong supervision for aggregation (WikiSQL-supervised) # We test all of them: @slow def test_inference_question_answering_head_conversational(self): # note that google/tapas-base-finetuned-sqa should correspond to tapas_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -9997.274, -16.262585, -10004.089, 15.435196, 15.435196, 15.435196, -9990.443, -16.327433, -16.327433, -16.327433, -16.327433, -16.327433, -10004.84, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.015) @slow def test_inference_question_answering_head_conversational_absolute_embeddings(self): # note that google/tapas-small-finetuned-sqa should correspond to tapas_sqa_inter_masklm_small_reset # however here we test the version with absolute position embeddings model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-small-finetuned-sqa") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -10000.041, -18.369339, -10014.692, 17.730324, 17.730324, 17.730324, -9984.974, -18.322773, -18.322773, -18.322773, -18.322773, -18.322773, -10007.267, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.01) @slow def test_inference_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries = prepare_tapas_batch_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([2, 28]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [-160.375504, -160.375504, -160.375504, -10072.3965, -10070.9414, -10094.9736], [-9861.6123, -9861.6123, -9861.6123, -9861.6123, -9891.01172, 146.600677], ] ) tf.debugging.assert_near(logits[:, -6:], expected_slice, atol=0.4) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant( [[18.8545208, -9.76614857, -6.3128891, -2.93525243], [-4.05782509, 40.0351, -5.35329962, 23.3978653]] ) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.001) # test the predicted answer coordinates and aggregation indices EXPECTED_PREDICTED_ANSWER_COORDINATES = [[(0, 0)], [(1, 2)]] EXPECTED_PREDICTED_AGGREGATION_INDICES = [0, 1] predicted_answer_coordinates, predicted_aggregation_indices = tokenizer.convert_logits_to_predictions( inputs, outputs.logits, outputs.logits_aggregation ) tf.debugging.assert_equal(EXPECTED_PREDICTED_ANSWER_COORDINATES, predicted_answer_coordinates) tf.debugging.assert_equal(EXPECTED_PREDICTED_AGGREGATION_INDICES, predicted_aggregation_indices) @slow def test_training_question_answering_head_weak_supervision(self): # note that google/tapas-base-finetuned-wtq should correspond to tapas_wtq_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") tokenizer = self.default_tokenizer # let's test on a batch table, queries, answer_coordinates, answer_text, float_answer = prepare_tapas_batch_inputs_for_training() inputs = tokenizer( table=table, queries=queries, answer_coordinates=answer_coordinates, answer_text=answer_text, padding="longest", return_tensors="tf", ) # the answer should be prepared by the user float_answer = tf.constant(float_answer, dtype=tf.float32) outputs = model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], labels=inputs["labels"], numeric_values=inputs["numeric_values"], numeric_values_scale=inputs["numeric_values_scale"], float_answer=float_answer, ) # test the loss loss = outputs.loss expected_loss = tf.constant(3.3527612686157227e-08) tf.debugging.assert_near(loss, expected_loss, atol=1e-6) # test the logits on the first example logits = outputs.logits expected_shape = tf.TensorShape([2, 29]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ -160.0156, -160.0156, -160.0156, -160.0156, -160.0156, -10072.2266, -10070.8896, -10092.6006, -10092.6006, ] ) tf.debugging.assert_near(logits[0, -9:], expected_slice, atol=1e-6) # test the aggregation logits on the second example logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([2, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([-4.0538, 40.0304, -5.3554, 23.3965]) tf.debugging.assert_near(logits_aggregation[1, -4:], expected_tensor, atol=1e-4) @slow def test_inference_question_answering_head_strong_supervision(self): # note that google/tapas-base-finetuned-wikisql-supervised should correspond to tapas_wikisql_sqa_inter_masklm_base_reset model = TFTapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wikisql-supervised") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = tf.TensorShape([1, 21]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant( [ [ -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -10011.1084, -18.6185989, -10008.7969, 17.6355762, 17.6355762, 17.6355762, -10002.4404, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -18.7111301, -10007.0977, ] ] ) tf.debugging.assert_near(logits, expected_slice, atol=0.02) # test the aggregation logits logits_aggregation = outputs.logits_aggregation expected_shape = tf.TensorShape([1, 4]) tf.debugging.assert_equal(logits_aggregation.shape, expected_shape) expected_tensor = tf.constant([[16.5659733, -3.06624889, -2.34152961, -0.970244825]]) tf.debugging.assert_near(logits_aggregation, expected_tensor, atol=0.003) @slow def test_inference_classification_head(self): # note that google/tapas-base-finetuned-tabfact should correspond to tapas_tabfact_inter_masklm_base_reset model = TFTapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") tokenizer = self.default_tokenizer table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="tf") outputs = model(**inputs) # test the classification logits logits = outputs.logits expected_shape = tf.TensorShape([1, 2]) tf.debugging.assert_equal(logits.shape, expected_shape) expected_slice = tf.constant([[0.795137286, 9.5572]]) tf.debugging.assert_near(logits, expected_slice, atol=0.05) # Below: tests for Tapas utilities which are defined in modeling_tf_tapas.py. # These are based on segmented_tensor_test.py of the original implementation. # URL: https://github.com/google-research/tapas/blob/master/tapas/models/segmented_tensor_test.py @require_tensorflow_probability class TFTapasUtilsTest(unittest.TestCase): def _prepare_tables(self): """Prepares two tables, both with three distinct rows. The first table has two columns: 1.0, 2.0 | 3.0 2.0, 0.0 | 1.0 1.0, 3.0 | 4.0 The second table has three columns: 1.0 | 2.0 | 3.0 2.0 | 0.0 | 1.0 1.0 | 3.0 | 4.0 Returns: SegmentedTensors with the tables. """ values = tf.constant( [ [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]], ] ) row_index = IndexMap( indices=[ [[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 0, 0], [1, 1, 1], [2, 2, 2]], ], num_segments=3, batch_dims=1, ) col_index = IndexMap( indices=[ [[0, 0, 1], [0, 0, 1], [0, 0, 1]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]], ], num_segments=3, batch_dims=1, ) return values, row_index, col_index def test_product_index(self): _, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_index_proj = cell_index.project_outer(cell_index) col_index_proj = cell_index.project_inner(cell_index) ind = cell_index.indices self.assertEqual(cell_index.num_segments, 9) # Projections should give back the original indices. # we use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(row_index.indices.numpy(), row_index_proj.indices.numpy()) self.assertEqual(row_index.num_segments, row_index_proj.num_segments) self.assertEqual(row_index.batch_dims, row_index_proj.batch_dims) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(col_index.indices.numpy(), col_index_proj.indices.numpy()) self.assertEqual(col_index.batch_dims, col_index_proj.batch_dims) # The first and second "column" are identified in the first table. for i in range(3): self.assertEqual(ind[0, i, 0], ind[0, i, 1]) self.assertNotEqual(ind[0, i, 0], ind[0, i, 2]) # All rows are distinct in the first table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 and j != j_2: self.assertNotEqual(ind[0, i, j], ind[0, i_2, j_2]) # All cells are distinct in the second table. for i, i_2 in zip(range(3), range(3)): for j, j_2 in zip(range(3), range(3)): if i != i_2 or j != j_2: self.assertNotEqual(ind[1, i, j], ind[1, i_2, j_2]) def test_flatten(self): _, row_index, col_index = self._prepare_tables() row_index_flat = flatten(row_index) col_index_flat = flatten(col_index) shape = [3, 4, 5] batched_index = IndexMap(indices=tf.zeros(shape, dtype=tf.int32), num_segments=1, batch_dims=3) batched_index_flat = flatten(batched_index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal( row_index_flat.indices.numpy(), [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5] ) np.testing.assert_array_equal( col_index_flat.indices.numpy(), [0, 0, 1, 0, 0, 1, 0, 0, 1, 3, 4, 5, 3, 4, 5, 3, 4, 5] ) self.assertEqual(batched_index_flat.num_segments.numpy(), np.prod(shape)) np.testing.assert_array_equal(batched_index_flat.indices.numpy(), range(np.prod(shape))) def test_range_index_map(self): batch_shape = [3, 4] num_segments = 5 index = range_index_map(batch_shape, num_segments) self.assertEqual(num_segments, index.num_segments) self.assertEqual(2, index.batch_dims) indices = index.indices # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(list(indices.shape), [3, 4, 5]) for i in range(batch_shape[0]): for j in range(batch_shape[1]): # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(indices[i, j, :].numpy(), range(num_segments)) def test_reduce_sum(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_sum, _ = reduce_sum(values, row_index) col_sum, _ = reduce_sum(values, col_index) cell_sum, _ = reduce_sum(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(row_sum.numpy(), [[6.0, 3.0, 8.0], [6.0, 3.0, 8.0]]) np.testing.assert_allclose(col_sum.numpy(), [[9.0, 8.0, 0.0], [4.0, 5.0, 8.0]]) np.testing.assert_allclose( cell_sum.numpy(), [[3.0, 3.0, 0.0, 2.0, 1.0, 0.0, 4.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0]], ) def test_reduce_mean(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) row_mean, _ = reduce_mean(values, row_index) col_mean, _ = reduce_mean(values, col_index) cell_mean, _ = reduce_mean(values, cell_index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose( row_mean.numpy(), [[6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0], [6.0 / 3.0, 3.0 / 3.0, 8.0 / 3.0]] ) np.testing.assert_allclose(col_mean.numpy(), [[9.0 / 6.0, 8.0 / 3.0, 0.0], [4.0 / 3.0, 5.0 / 3.0, 8.0 / 3.0]]) np.testing.assert_allclose( cell_mean.numpy(), [ [3.0 / 2.0, 3.0, 0.0, 2.0 / 2.0, 1.0, 0.0, 4.0 / 2.0, 4.0, 0.0], [1.0, 2.0, 3.0, 2.0, 0.0, 1.0, 1.0, 3.0, 4.0], ], ) def test_reduce_max(self): values = tf.convert_to_tensor([2.0, 1.0, 0.0, 3.0]) index = IndexMap(indices=tf.convert_to_tensor([0, 1, 0, 1]), num_segments=2) maximum, _ = reduce_max(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(maximum.numpy(), [2, 3]) def test_reduce_sum_vectorized(self): values = tf.convert_to_tensor([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0], [3.0, 4.0, 5.0]]) index = IndexMap(indices=tf.convert_to_tensor([0, 0, 1]), num_segments=2, batch_dims=0) sums, new_index = reduce_sum(values, index) # We use np.testing.assert_allclose rather than Tensorflow's assertAllClose np.testing.assert_allclose(sums.numpy(), [[3.0, 5.0, 7.0], [3.0, 4.0, 5.0]]) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(new_index.indices.numpy(), [0, 1]) np.testing.assert_array_equal(new_index.num_segments.numpy(), 2) np.testing.assert_array_equal(new_index.batch_dims, 0) def test_gather(self): values, row_index, col_index = self._prepare_tables() cell_index = ProductIndexMap(row_index, col_index) # Compute sums and then gather. The result should have the same shape as # the original table and each element should contain the sum the values in # its cell. sums, _ = reduce_sum(values, cell_index) cell_sum = gather(sums, cell_index) assert cell_sum.shape == values.shape # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_allclose( cell_sum.numpy(), [[[3.0, 3.0, 3.0], [2.0, 2.0, 1.0], [4.0, 4.0, 4.0]], [[1.0, 2.0, 3.0], [2.0, 0.0, 1.0], [1.0, 3.0, 4.0]]], ) def test_gather_vectorized(self): values = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) index = IndexMap(indices=tf.convert_to_tensor([[0, 1], [1, 0]]), num_segments=2, batch_dims=1) result = gather(values, index) # We use np.testing.assert_array_equal rather than Tensorflow's assertAllEqual np.testing.assert_array_equal(result.numpy(), [[[1, 2], [3, 4]], [[7, 8], [5, 6]]])
transformers/tests/models/tapas/test_modeling_tf_tapas.py/0
{ "file_path": "transformers/tests/models/tapas/test_modeling_tf_tapas.py", "repo_id": "transformers", "token_count": 21105 }
370
# coding=utf-8 # Copyright 2023 The Intel Team Authors, The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.image_transforms import PaddingMode from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import TvpImageProcessor class TvpImageProcessingTester(unittest.TestCase): def __init__( self, parent, do_resize: bool = True, size: Dict[str, int] = {"longest_edge": 40}, do_center_crop: bool = False, crop_size: Dict[str, int] = None, do_rescale: bool = False, rescale_factor: Union[int, float] = 1 / 255, do_pad: bool = True, pad_size: Dict[str, int] = {"height": 80, "width": 80}, fill: int = None, pad_mode: PaddingMode = None, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073], image_std: Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711], batch_size=2, min_resolution=40, max_resolution=80, num_channels=3, num_frames=2, ): self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad self.pad_size = pad_size self.fill = fill self.pad_mode = pad_mode self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.num_frames = num_frames def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "do_center_crop": self.do_center_crop, "do_pad": self.do_pad, "pad_size": self.pad_size, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to TvpImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: return (int(self.pad_size["height"]), int(self.pad_size["width"])) else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class TvpImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = TvpImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = TvpImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_rescale")) self.assertTrue(hasattr(image_processing, "do_pad")) self.assertTrue(hasattr(image_processing, "pad_size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"longest_edge": 40}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size={"longest_edge": 12}) self.assertEqual(image_processor.size, {"longest_edge": 12}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs) encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) # Test batched expected_height, expected_width = self.image_processor_tester.get_expected_values(video_inputs, batched=True) encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_videos.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
transformers/tests/models/tvp/test_image_processing_tvp.py/0
{ "file_path": "transformers/tests/models/tvp/test_image_processing_tvp.py", "repo_id": "transformers", "token_count": 5453 }
371
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch UperNet framework. """ import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UperNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, num_stages=4, hidden_sizes=[10, 20, 30, 40], depths=[1, 1, 1, 1], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", type_sequence_label_size=10, initializer_range=0.02, out_features=["stage2", "stage3", "stage4"], num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_stages = num_stages self.hidden_sizes = hidden_sizes self.depths = depths self.is_training = is_training self.use_labels = use_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.out_features = out_features self.num_labels = num_labels self.scope = scope self.num_hidden_layers = num_stages def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_backbone_config(self): return ConvNextConfig( num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, ) def get_config(self): return UperNetConfig( backbone_config=self.get_backbone_config(), backbone=None, hidden_size=64, pool_scales=[1, 2, 3, 6], use_auxiliary_head=True, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=32, auxiliary_num_convs=1, auxiliary_concat_input=False, loss_ignore_index=255, num_labels=self.num_labels, ) def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels): model = UperNetForSemanticSegmentation(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UperNetModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as UperNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (UperNetForSemanticSegmentation,) if is_torch_available() else () pipeline_model_mapping = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = UperNetModelTester(self) self.config_tester = ConfigTester(self, config_class=UperNetConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_for_semantic_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) @unittest.skip(reason="UperNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="UperNet does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_from_base(self): pass @unittest.skip(reason="UperNet does not have a base model") def test_save_load_fast_init_to_base(self): pass @require_torch_multi_gpu @unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`") def test_multi_gpu_data_parallel_forward(self): pass def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_stages = self.model_tester.num_stages self.assertEqual(len(hidden_states), expected_num_stages + 1) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.backbone_config = _config_zero_init(configs_no_init.backbone_config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="UperNet does not have tied weights") def test_tied_model_weights_key_ignore(self): pass @slow def test_model_from_pretrained(self): for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = UperNetForSemanticSegmentation.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of ADE20k def prepare_img(): filepath = hf_hub_download( repo_id="hf-internal-testing/fixtures_ade20k", repo_type="dataset", filename="ADE_val_00000001.jpg" ) image = Image.open(filepath).convert("RGB") return image @require_torch @require_vision @slow class UperNetModelIntegrationTest(unittest.TestCase): def test_inference_swin_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)) def test_inference_convnext_backbone(self): processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny") model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny").to(torch_device) image = prepare_img() inputs = processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape = torch.Size((1, model.config.num_labels, 512, 512)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4))
transformers/tests/models/upernet/test_modeling_upernet.py/0
{ "file_path": "transformers/tests/models/upernet/test_modeling_upernet.py", "repo_id": "transformers", "token_count": 4945 }
372
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VisionTextDualEncoder model. """ import collections import tempfile import unittest import numpy as np from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..clip.test_modeling_clip import CLIPVisionModelTester from ..deit.test_modeling_deit import DeiTModelTester from ..roberta.test_modeling_roberta import RobertaModelTester from ..vit.test_modeling_vit import ViTModelTester if is_torch_available(): import torch from transformers import ( BertModel, CLIPVisionModel, DeiTModel, RobertaModel, VisionTextDualEncoderConfig, VisionTextDualEncoderModel, ViTModel, ) if is_flax_available(): from transformers import FlaxVisionTextDualEncoderModel from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor # Inspired by # https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py # From PyTorch internals def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_torch class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = VisionTextDualEncoderModel(config) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = VisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() with torch.no_grad(): output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = VisionTextDualEncoderModel.from_pretrained(tmpdirname).eval() model.to(torch_device) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].cpu().numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_equivalence(self, pt_model, fx_model, input_ids, attention_mask, pixel_values, **kwargs): pt_model.to(torch_device) pt_model.eval() # prepare inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values} pt_inputs = inputs_dict flax_inputs = {k: v.numpy() for k, v in pt_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = VisionTextDualEncoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class ViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = ViTModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = ViTModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_torch class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-deit", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = DeiTModel(vision_config).eval() text_model = RobertaModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = DeiTModelTester(self) bert_model_tester = RobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } # skip as DeiT is not available in Flax def test_pt_flax_equivalence(self): pass @require_torch class CLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = CLIPVisionModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = CLIPVisionModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_torch class VisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1.0) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="pt" ) outputs = model(**inputs) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = torch.tensor([[1.2284727, 0.3104122]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
transformers/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py/0
{ "file_path": "transformers/tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", "repo_id": "transformers", "token_count": 9347 }
373
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TensorFlow Whisper model. """ from __future__ import annotations import inspect import tempfile import traceback import unittest import numpy as np from transformers import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor from transformers.testing_utils import is_tf_available, require_tf, require_tokenizers, run_test_in_subprocess, slow from transformers.utils import cached_property from transformers.utils.import_utils import is_datasets_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_tf_available(): import tensorflow as tf from transformers import TFWhisperForConditionalGeneration, TFWhisperModel, set_seed from transformers.models.whisper.modeling_tf_whisper import ( TFWhisperDecoder, TFWhisperEncoder, sinusoidal_embedding_init, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = tf.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class TFWhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=60, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict): model = TFWhisperModel(config=config) input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFWhisperModel(config=config).get_decoder() # take a slice so we're shorter than the seqeuence length and can append later input_ids = inputs_dict["decoder_input_ids"][:, :-10] attention_mask = inputs_dict["decoder_attention_mask"][:, :-10] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_token = ids_tensor((self.batch_size, 3), config.vocab_size) next_tokens = tf.where(next_token <= 2, 2, next_token) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = np.random.randint(0, output_from_past.shape[-1]) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(np.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = TFWhisperModel(config=config) outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = TFWhisperEncoder.from_pretrained(tmpdirname) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = TFWhisperDecoder.from_pretrained(tmpdirname) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max() < 1e-3) @require_tf class TFWhisperModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (TFWhisperModel, TFWhisperForConditionalGeneration) if is_tf_available() else () all_generative_model_classes = (TFWhisperForConditionalGeneration,) if is_tf_available() else () pipeline_model_mapping = {"feature-extraction": TFWhisperModel} if is_tf_available() else {} is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False test_onnx = False input_name = "input_features" # TODO (ydshieh): undo skip once a fix is done on TF side. @unittest.skip("Skip for now as TF 2.13 breaks it on GPU") def test_xla_generate_slow(self): super().test_xla_generate_slow() def setUp(self): self.model_tester = TFWhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) model.build_in_name_scope() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=False) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_requires_grad_encoder_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) encoder = model.get_encoder() self.assertFalse(encoder.embed_positions.trainable) def test_encoder_sinusoidal_embed_positions(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.build_in_name_scope() embeds = model.get_encoder().embed_positions.get_weights()[0] sinusoids = sinusoidal_embedding_init(embeds.shape).numpy() self.assertTrue(np.allclose(embeds, sinusoids)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] # generate max 3 tokens max_length = 4 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length # not implemented currently def test_inputs_embeds(self): pass @unittest.skip("Training is not yet supported") def test_training(self): pass def test_generate_with_head_masking(self): pass @unittest.skip("fp16 is not yet supported for TF models") def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = TFWhisperForConditionalGeneration(config) model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["decoder_position_ids", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None): # We override with a slightly higher tol value, as test recently became flaky super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", encoder_key_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + tf.convert_to_tensor( [model._get_decoder_start_token_id()] ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) # iterate over all generative models for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_features with self.assertRaises(AssertionError): model.generate(do_sample=True, max_length=5) # num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True)) with self.assertRaises(ValueError): # generating multiple sequences when no beam search generation # is not allowed as it would always generate the same sequences model.generate(input_features, do_sample=False, num_return_sequences=2) # num_return_sequences > 1, sample self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is # `input_features` def test_lm_head_model_random_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_features = inputs_dict.get("input_features", None) for model_class in self.all_generative_model_classes: model = model_class(config) if config.bos_token_id is None: # if bos token id is not defined model needs input_ids, num_return_sequences = 1 self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) with self.assertRaises(ValueError): # generating more sequences than having beams leads is not possible model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) # num_return_sequences > 1, sample self._check_generated_ids( model.generate( input_features, do_sample=True, num_beams=2, num_return_sequences=2, ) ) # num_return_sequences > 1, greedy self._check_generated_ids( model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) ) # check bad words tokens language generation # create list of 1-seq bad token and list of 2-seq of bad tokens bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] output_tokens = model.generate( input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 ) # only count generated tokens generated_ids = output_tokens[:, input_features.shape[-1] :] self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) def test_generate_with_prompt_ids_and_task_and_language(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.arange(5) language = "<|de|>" task = "translate" lang_id = 6 task_id = 7 model.generation_config.__setattr__("lang_to_id", {language: lang_id}) model.generation_config.__setattr__("task_to_id", {task: task_id}) output = model.generate(input_features, max_new_tokens=5, task=task, language=language, prompt_ids=prompt_ids) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, lang_id, task_id, ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def test_generate_with_prompt_ids_and_forced_decoder_ids(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = TFWhisperForConditionalGeneration(config) input_features = input_dict["input_features"] prompt_ids = np.asarray(range(5)) forced_decoder_ids = [(1, 6), (2, 7), (3, 8)] output = model.generate( input_features, max_new_tokens=5, forced_decoder_ids=forced_decoder_ids, prompt_ids=prompt_ids ) expected_output_start = [ *prompt_ids.tolist(), model.generation_config.decoder_start_token_id, *[token for _rank, token in forced_decoder_ids], ] for row in output.numpy().tolist(): self.assertListEqual(row[: len(expected_output_start)], expected_output_start) def _load_datasamples(num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _test_large_logits_librispeech(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="tf" ) input_features = processed_inputs.input_features decoder_input_ids = processed_inputs.labels logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on unittest.TestCase().assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_generation_multilingual(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" unittest.TestCase().assertEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() def _test_large_batched_generation(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = _load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids_1 = model.generate(input_features[0:2], max_length=20) generated_ids_2 = model.generate(input_features[2:4], max_length=20) generated_ids = np.concatenate([generated_ids_1, generated_ids_2]) # fmt: off EXPECTED_IDS = [ [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] ] # fmt: on unittest.TestCase().assertEqual(generated_ids.tolist(), EXPECTED_IDS) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) unittest.TestCase().assertListEqual(transcript, EXPECTED_TRANSCRIPT) except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() @require_tf @require_tokenizers class TFWhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): return _load_datasamples(num_samples) @slow def test_tiny_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = tf.convert_to_tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) self.assertTrue(np.allclose(head_logits[0, 0, :30], EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) model = TFWhisperModel.from_pretrained("openai/whisper-small.en") input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="tf").input_features logits = model( input_features, decoder_input_ids=tf.convert_to_tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): run_test_in_subprocess(test_case=self, target_func=_test_large_logits_librispeech, inputs=None) @slow def test_tiny_en_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, num_beams=5, max_length=20) generated_ids_xla = xla_generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) transcript_xla = processor.tokenizer.decode(generated_ids_xla[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) self.assertEqual(transcript_xla, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation, inputs=None) @slow def test_large_generation_multilingual(self): run_test_in_subprocess(test_case=self, target_func=_test_large_generation_multilingual, inputs=None) @slow def test_large_batched_generation(self): run_test_in_subprocess(test_case=self, target_func=_test_large_batched_generation, inputs=None) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features generated_ids = model.generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_xla_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features xla_generate = tf.function(model.generate, jit_compile=True) generated_ids = model.generate(input_features, max_length=20) generated_ids_xla = xla_generate(input_features, max_length=20) # fmt: off EXPECTED_LOGITS = tf.convert_to_tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) self.assertTrue(np.allclose(generated_ids_xla, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) transcript_xla = processor.batch_decode(generated_ids_xla, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) self.assertListEqual(transcript_xla, EXPECTED_TRANSCRIPT)
transformers/tests/models/whisper/test_modeling_tf_whisper.py/0
{ "file_path": "transformers/tests/models/whisper/test_modeling_tf_whisper.py", "repo_id": "transformers", "token_count": 21571 }
374
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device if is_torch_available(): import torch from transformers import XLMProphetNetForConditionalGeneration, XLMProphetNetTokenizer @require_torch class XLMProphetNetModelIntegrationTest(unittest.TestCase): @slow def test_pretrained_checkpoint_hidden_states(self): model = XLMProphetNetForConditionalGeneration.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") model.to(torch_device) # encoder-decoder outputs encoder_ids = torch.tensor([[17, 96208, 103471, 2]]).to(torch_device) decoder_prev_ids = torch.tensor( [[2, 250, 9953, 34, 69489, 1620, 32, 118424, 624, 210, 105, 2913, 1032, 351]] ).to(torch_device) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids ) output_predited_logis = output[0] expected_shape = torch.Size((1, 14, 250012)) self.assertEqual(output_predited_logis.shape, expected_shape) expected_slice = torch.tensor( [[[-6.3986, -8.2391, 12.5189], [-6.3289, -8.0864, 12.6211], [-6.2418, -8.0445, 12.7968]]] ).to(torch_device) self.assertTrue(torch.allclose(output_predited_logis[:, :3, :3], expected_slice, atol=1e-4)) # encoder outputs encoder_outputs = model.prophetnet.encoder(encoder_ids)[0] expected_encoder_outputs_slice = torch.tensor( [[[-1.4260, -0.7628, 0.8453], [-1.4719, -0.1391, 0.7807], [-1.7678, 0.0114, 0.4646]]] ).to(torch_device) expected_shape_encoder = torch.Size((1, 4, 1024)) self.assertEqual(encoder_outputs.shape, expected_shape_encoder) self.assertTrue(torch.allclose(encoder_outputs[:, :3, :3], expected_encoder_outputs_slice, atol=1e-4)) # decoder outputs decoder_outputs = model.prophetnet.decoder( decoder_prev_ids, encoder_hidden_states=encoder_outputs, ) predicting_streams = decoder_outputs[1].view(1, model.config.ngram, 14, -1) predicting_streams_logits = model.lm_head(predicting_streams) next_first_stream_logits = predicting_streams_logits[:, 0] self.assertTrue(torch.allclose(next_first_stream_logits[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_ntg_hidden_states(self): model = XLMProphetNetForConditionalGeneration.from_pretrained( "microsoft/xprophetnet-large-wiki100-cased-xglue-ntg" ) model.to(torch_device) encoder_ids = torch.tensor([[17, 96208, 103471, 2]]).to(torch_device) decoder_prev_ids = torch.tensor( [[2, 250, 9953, 34, 69489, 1620, 32, 118424, 624, 210, 105, 2913, 1032, 351]] ).to(torch_device) output = model( input_ids=encoder_ids, attention_mask=None, encoder_outputs=None, decoder_input_ids=decoder_prev_ids ) output_predited_logis = output[0] expected_shape = torch.Size((1, 14, 250012)) self.assertEqual(output_predited_logis.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor( [[[-9.2253, -9.7173, -6.3529], [-7.6701, -9.0145, -1.9382], [-8.0195, -7.0004, -0.1523]]] ).to(torch_device) self.assertTrue(torch.allclose(output_predited_logis[:, :3, :3], expected_slice, atol=1e-4)) @slow def test_xprophetnet_ntg_inference(self): model = XLMProphetNetForConditionalGeneration.from_pretrained( "microsoft/xprophetnet-large-wiki100-cased-xglue-ntg" ) model.to(torch_device) model.config.max_length = 512 tokenizer = XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased-xglue-ntg") EN_SENTENCE = ( "Microsoft Corporation intends to officially end free support for the Windows 7 operating system after" " January 14, 2020, according to the official portal of the organization. From that day, users of this" " system will not be able to receive security updates, which could make their computers vulnerable to" " cyber attacks." ) RU_SENTENCE = ( "орпорация Microsoft намерена официально прекратить бесплатную поддержку операционной системы Windows 7" " после 14 января 2020 года, сообщается на официальном портале организации . С указанного дня пользователи" " этой системы не смогут получать обновления безопасности, из-за чего их компьютеры могут стать уязвимыми" " к кибератакам." ) ZH_SENTENCE = "根据该组织的官方门户网站,微软公司打算在2020年1月14日之后正式终止对Windows 7操作系统的免费支持。从那时起,该系统的用户将无法接收安全更新,这可能会使他们的计算机容易受到网络攻击。" input_ids = tokenizer( [EN_SENTENCE, RU_SENTENCE, ZH_SENTENCE], padding=True, max_length=255, return_tensors="pt" ).input_ids input_ids = input_ids.to(torch_device) summary_ids = model.generate( input_ids, num_beams=10, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) generated_titles = [tokenizer.decode(g, skip_special_tokens=True) for g in summary_ids] EXPECTED_TITLE_EN = "Microsoft to end Windows 7 free support after January 14, 2020" EXPECTED_TITLE_RU = "Microsoft намерена прекратить бесплатную поддержку Windows 7 после 14 января 2020 года" EXPECTED_TITLE_ZH = "微软打算终止对Windows 7操作系统的免费支持" self.assertListEqual( [EXPECTED_TITLE_EN, EXPECTED_TITLE_RU, EXPECTED_TITLE_ZH], generated_titles, ) summary_ids_beam1 = model.generate( input_ids, num_beams=1, length_penalty=1.0, no_repeat_ngram_size=3, early_stopping=True ) generated_titles_beam1_tok = [ tokenizer.convert_ids_to_tokens(g, skip_special_tokens=True) for g in summary_ids_beam1 ] EXPECTED_TITLE_EN_BEAM1_TOK = "▁Microsoft ▁to ▁end ▁free ▁support ▁for ▁Windows ▁7".split(" ") EXPECTED_TITLE_RU_BEAM1_TOK = "▁Microsoft ▁намерен а ▁прекрати ть ▁бес плат ную ▁поддержку ▁Windows ▁7 ▁после ▁14 ▁января ▁2020 ▁года".split( " " ) EXPECTED_TITLE_ZH_BEAM1_TOK = "微软 公司 打算 终止 对 Windows ▁7 操作 系统的 免费 支持".split(" ") self.assertListEqual( [EXPECTED_TITLE_EN_BEAM1_TOK, EXPECTED_TITLE_RU_BEAM1_TOK, EXPECTED_TITLE_ZH_BEAM1_TOK], generated_titles_beam1_tok, )
transformers/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py/0
{ "file_path": "transformers/tests/models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", "repo_id": "transformers", "token_count": 3737 }
375
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib import unittest from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import AnnotationFormatTestMixin, ImageProcessingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class YolosImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], do_rescale=True, rescale_factor=1 / 255, do_pad=True, ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_pad = do_pad def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def get_expected_values(self, image_inputs, batched=False): """ This function computes the expected height and width when providing images to YolosImageProcessor, assuming do_resize is set to True with a scalar size. """ if not batched: image = image_inputs[0] if isinstance(image, Image.Image): width, height = image.size else: height, width = image.shape[1], image.shape[2] size = self.size["shortest_edge"] max_size = self.size.get("longest_edge", None) if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if width < height and width != size: height = int(size * height / width) width = size elif height < width and height != size: width = int(size * width / height) height = size width_mod = width % 16 height_mod = height % 16 expected_width = width - width_mod expected_height = height - height_mod else: expected_values = [] for image in image_inputs: expected_height, expected_width = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) expected_height = max(expected_values, key=lambda item: item[0])[0] expected_width = max(expected_values, key=lambda item: item[1])[1] return expected_height, expected_width def expected_output_image_shape(self, images): height, width = self.get_expected_values(images, batched=True) return self.num_channels, height, width def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_image_inputs( batch_size=self.batch_size, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class YolosImageProcessingTest(AnnotationFormatTestMixin, ImageProcessingTestMixin, unittest.TestCase): image_processing_class = YolosImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = YolosImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18, "longest_edge": 1333}) self.assertEqual(image_processor.do_pad, True) image_processor = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=False ) self.assertEqual(image_processor.size, {"shortest_edge": 42, "longest_edge": 84}) self.assertEqual(image_processor.do_pad, False) def test_equivalence_padding(self): # Initialize image_processings image_processing_1 = self.image_processing_class(**self.image_processor_dict) image_processing_2 = self.image_processing_class(do_resize=False, do_normalize=False, do_rescale=False) # create random PyTorch tensors image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test whether the method "pad" and calling the image processor return the same tensors encoded_images_with_method = image_processing_1.pad(image_inputs, return_tensors="pt") encoded_images = image_processing_2(image_inputs, return_tensors="pt") self.assertTrue( torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) ) def test_resize_max_size_respected(self): image_processor = self.image_processing_class(**self.image_processor_dict) # create torch tensors as image image = torch.randint(0, 256, (3, 100, 1500), dtype=torch.uint8) processed_image = image_processor( image, size={"longest_edge": 1333, "shortest_edge": 800}, do_pad=False, return_tensors="pt" )["pixel_values"] self.assertTrue(processed_image.shape[-1] <= 1333) self.assertTrue(processed_image.shape[-2] <= 800) @slow def test_call_pytorch_with_coco_detection_annotations(self): # prepare image and target image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"image_id": 39769, "annotations": target} # encode them image_processing = YolosImageProcessor.from_pretrained("hustvl/yolos-small") encoding = image_processing(images=image, annotations=target, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1056]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5832.7256, 11144.6689, 484763.2500, 829269.8125, 146579.4531, 164177.6250]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1056]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): # prepare image, target and masks_path image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them image_processing = YolosImageProcessor(format="coco_panoptic") encoding = image_processing(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values expected_shape = torch.Size([1, 3, 800, 1056]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([146591.5000, 163974.2500, 480092.2500, 11187.0000, 5824.5000, 7562.5000]) self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 815161 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1056]) self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) # Output size is slight different from DETR as yolos takes mod of 16 @slow def test_batched_coco_detection_annotations(self): image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: target = json.loads(f.read()) annotations_0 = {"image_id": 39769, "annotations": target} annotations_1 = {"image_id": 39769, "annotations": target} # Adjust the bounding boxes for the resized image w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotations_1["annotations"])): coords = annotations_1["annotations"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotations_1["annotations"][i]["bbox"] = new_bbox images = [image_0, image_1] annotations = [annotations_0, annotations_1] image_processing = YolosImageProcessor() encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, return_tensors="pt", # do_convert_annotations=True ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1056 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.6879, 0.4609, 0.0755, 0.3691], [0.2118, 0.3359, 0.2601, 0.1566], [0.5011, 0.5000, 0.9979, 1.0000], [0.5010, 0.5020, 0.9979, 0.9959], [0.3284, 0.5944, 0.5884, 0.8112], [0.8394, 0.5445, 0.3213, 0.9110], ] ) expected_boxes_1 = torch.tensor( [ [0.4169, 0.2765, 0.0458, 0.2215], [0.1284, 0.2016, 0.1576, 0.0940], [0.3792, 0.4933, 0.7559, 0.9865], [0.3794, 0.5002, 0.7563, 0.9955], [0.1990, 0.5456, 0.3566, 0.8646], [0.5845, 0.4115, 0.3462, 0.7161], ] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3)) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1)) # Output size is slight different from DETR as yolos takes mod of 16 def test_batched_coco_panoptic_annotations(self): # prepare image, target and masks_path image_0 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") image_1 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png").resize((800, 800)) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: target = json.loads(f.read()) annotation_0 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} annotation_1 = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} w_0, h_0 = image_0.size w_1, h_1 = image_1.size for i in range(len(annotation_1["segments_info"])): coords = annotation_1["segments_info"][i]["bbox"] new_bbox = [ coords[0] * w_1 / w_0, coords[1] * h_1 / h_0, coords[2] * w_1 / w_0, coords[3] * h_1 / h_0, ] annotation_1["segments_info"][i]["bbox"] = new_bbox masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") images = [image_0, image_1] annotations = [annotation_0, annotation_1] # encode them image_processing = YolosImageProcessor(format="coco_panoptic") encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_tensors="pt", return_segmentation_masks=True, ) # Check the pixel values have been padded postprocessed_height, postprocessed_width = 800, 1056 expected_shape = torch.Size([2, 3, postprocessed_height, postprocessed_width]) self.assertEqual(encoding["pixel_values"].shape, expected_shape) # Check the bounding boxes have been adjusted for padded images self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) expected_boxes_0 = torch.tensor( [ [0.2625, 0.5437, 0.4688, 0.8625], [0.7719, 0.4104, 0.4531, 0.7125], [0.5000, 0.4927, 0.9969, 0.9854], [0.1688, 0.2000, 0.2063, 0.0917], [0.5492, 0.2760, 0.0578, 0.2187], [0.4992, 0.4990, 0.9984, 0.9979], ] ) expected_boxes_1 = torch.tensor( [ [0.1591, 0.3262, 0.2841, 0.5175], [0.4678, 0.2463, 0.2746, 0.4275], [0.3030, 0.2956, 0.6042, 0.5913], [0.1023, 0.1200, 0.1250, 0.0550], [0.3329, 0.1656, 0.0350, 0.1312], [0.3026, 0.2994, 0.6051, 0.5987], ] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, atol=1e-3)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, atol=1e-3)) # Check the masks have also been padded self.assertEqual(encoding["labels"][0]["masks"].shape, torch.Size([6, 800, 1056])) self.assertEqual(encoding["labels"][1]["masks"].shape, torch.Size([6, 800, 1056])) # Check if do_convert_annotations=False, then the annotations are not converted to centre_x, centre_y, width, height # format and not in the range [0, 1] encoding = image_processing( images=images, annotations=annotations, masks_path=masks_path, return_segmentation_masks=True, do_convert_annotations=False, return_tensors="pt", ) self.assertEqual(encoding["labels"][0]["boxes"].shape, torch.Size([6, 4])) self.assertEqual(encoding["labels"][1]["boxes"].shape, torch.Size([6, 4])) # Convert to absolute coordinates unnormalized_boxes_0 = torch.vstack( [ expected_boxes_0[:, 0] * postprocessed_width, expected_boxes_0[:, 1] * postprocessed_height, expected_boxes_0[:, 2] * postprocessed_width, expected_boxes_0[:, 3] * postprocessed_height, ] ).T unnormalized_boxes_1 = torch.vstack( [ expected_boxes_1[:, 0] * postprocessed_width, expected_boxes_1[:, 1] * postprocessed_height, expected_boxes_1[:, 2] * postprocessed_width, expected_boxes_1[:, 3] * postprocessed_height, ] ).T # Convert from centre_x, centre_y, width, height to x_min, y_min, x_max, y_max expected_boxes_0 = torch.vstack( [ unnormalized_boxes_0[:, 0] - unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] - unnormalized_boxes_0[:, 3] / 2, unnormalized_boxes_0[:, 0] + unnormalized_boxes_0[:, 2] / 2, unnormalized_boxes_0[:, 1] + unnormalized_boxes_0[:, 3] / 2, ] ).T expected_boxes_1 = torch.vstack( [ unnormalized_boxes_1[:, 0] - unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] - unnormalized_boxes_1[:, 3] / 2, unnormalized_boxes_1[:, 0] + unnormalized_boxes_1[:, 2] / 2, unnormalized_boxes_1[:, 1] + unnormalized_boxes_1[:, 3] / 2, ] ).T self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"], expected_boxes_0, rtol=1)) self.assertTrue(torch.allclose(encoding["labels"][1]["boxes"], expected_boxes_1, rtol=1))
transformers/tests/models/yolos/test_image_processing_yolos.py/0
{ "file_path": "transformers/tests/models/yolos/test_image_processing_yolos.py", "repo_id": "transformers", "token_count": 11338 }
376
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class FillMaskPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ], ) @require_torch def test_small_model_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ], ) outputs = unmasker("My name is <mask> <mask>", top_k=2) self.assertEqual( nested_simplify(outputs, decimals=6), [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ], ) @require_torch_accelerator def test_fp16_casting(self): pipe = pipeline( "fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=torch_device, framework="pt", ) # convert model to fp16 pipe.model.half() response = pipe("Paris is the [MASK] of France.") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(response, list) @slow @require_torch def test_large_model_pt(self): unmasker = pipeline(task="fill-mask", model="distilbert/distilroberta-base", top_k=2, framework="pt") self.run_large_test(unmasker) @slow @require_tf def test_large_model_tf(self): unmasker = pipeline(task="fill-mask", model="distilbert/distilroberta-base", top_k=2, framework="tf") self.run_large_test(unmasker) def run_large_test(self, unmasker): outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs), [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ], ) dummy_str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100 outputs = unmasker( "My name is <mask>" + dummy_str, tokenizer_kwargs={"truncation": True}, ) simplified = nested_simplify(outputs, decimals=4) self.assertEqual( [{"sequence": x["sequence"][:100]} for x in simplified], [ {"sequence": f"My name is,{dummy_str}"[:100]}, {"sequence": f"My name is:,{dummy_str}"[:100]}, ], ) self.assertEqual( [{k: x[k] for k in x if k != "sequence"} for x in simplified], [ {"score": 0.2819, "token": 6, "token_str": ","}, {"score": 0.0954, "token": 46686, "token_str": ":,"}, ], ) @require_torch def test_model_no_pad_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) @require_tf def test_model_no_pad_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) examples = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def run_pipeline_test(self, fill_masker, examples): tokenizer = fill_masker.tokenizer model = fill_masker.model outputs = fill_masker( f"This is a {tokenizer.mask_token}", ) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], ) with self.assertRaises(ValueError): fill_masker([None]) # No mask_token is not supported with self.assertRaises(PipelineException): fill_masker("This is") self.run_test_top_k(model, tokenizer) self.run_test_targets(model, tokenizer) self.run_test_top_k_targets(model, tokenizer) self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer) self.fill_mask_with_multiple_masks(model, tokenizer) def run_test_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:2] # Pipeline argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Call argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Score equivalence outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) tokens = [top_mask["token_str"] for top_mask in outputs] scores = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(tokens) == set(targets): unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens) target_scores = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(scores), nested_simplify(target_scores)) # Raises with invalid with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""]) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="") def run_test_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2) self.assertEqual( outputs2, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def run_test_top_k_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) # top_k=2, ntargets=3 targets = sorted(vocab.keys())[:3] outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets) # If we use the most probably targets, and filter differently, we should still # have the same results targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(targets2).issubset(targets): outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2) # They should yield exactly the same result self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) vocab = tokenizer.get_vocab() # String duplicates + id duplicates targets = sorted(vocab.keys())[:3] targets = [targets[0], targets[1], targets[0], targets[2], targets[1]] outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(outputs), 3) def fill_mask_with_multiple_masks(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2 ) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], )
transformers/tests/pipelines/test_pipelines_fill_mask.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_fill_mask.py", "repo_id": "transformers", "token_count": 9736 }
377
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) else: translator = TranslationPipeline(model=model, tokenizer=tokenizer) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
transformers/tests/pipelines/test_pipelines_translation.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_translation.py", "repo_id": "transformers", "token_count": 3092 }
378
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4) ).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.check_quantized_layers_type(self.quantized_model, "cuda-old") # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
transformers/tests/quantization/gptq/test_gptq.py/0
{ "file_path": "transformers/tests/quantization/gptq/test_gptq.py", "repo_id": "transformers", "token_count": 7230 }
379
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
transformers/tests/sagemaker/test_multi_node_model_parallel.py/0
{ "file_path": "transformers/tests/sagemaker/test_multi_node_model_parallel.py", "repo_id": "transformers", "token_count": 2103 }
380
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import glob import json import os import os.path import sys import tempfile import unittest import unittest.mock as mock import uuid from pathlib import Path import requests from huggingface_hub import HfApi, HfFolder, delete_repo from huggingface_hub.file_download import http_get from pytest import mark from requests.exceptions import HTTPError from transformers import ( AutoConfig, AutoModel, AutoModelForSequenceClassification, OwlViTForObjectDetection, PretrainedConfig, is_torch_available, logging, ) from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, LoggingLevel, TestCasePlus, is_staging_test, require_accelerate, require_flax, require_safetensors, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_multi_accelerator, require_usr_bin_time, slow, torch_device, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from transformers.utils.import_utils import ( is_flash_attn_2_available, is_flax_available, is_tf_available, is_torch_sdpa_available, is_torchdynamo_available, ) sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_configuration import CustomConfig, NoSuperInitConfig # noqa E402 if is_torch_available(): import torch from safetensors.torch import save_file as safe_save_file from test_module.custom_modeling import CustomModel, NoSuperInitModel from torch import nn from transformers import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, AutoModelForCausalLM, AutoTokenizer, BertConfig, BertModel, CLIPTextModel, PreTrainedModel, T5Config, T5ForConditionalGeneration, ) from transformers.modeling_attn_mask_utils import ( AttentionMaskConverter, _create_4d_causal_attention_mask, _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask, ) from transformers.modeling_utils import shard_checkpoint # Fake pretrained models for tests class BaseModel(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) class BaseModelWithTiedWeights(PreTrainedModel): config_class = PretrainedConfig def __init__(self, config): super().__init__(config) self.linear = nn.Linear(5, 5) self.linear_2 = nn.Linear(5, 5) def forward(self, x): return self.linear_2(self.linear(x)) def tie_weights(self): self.linear_2.weight = self.linear.weight class ModelWithHead(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) # linear is a common name between Base and Head on purpose. self.linear = nn.Linear(5, 5) self.linear2 = nn.Linear(5, 5) def forward(self, x): return self.linear2(self.linear(self.base(x))) class ModelWithHeadAndTiedWeights(PreTrainedModel): base_model_prefix = "base" config_class = PretrainedConfig def _init_weights(self, module): pass def __init__(self, config): super().__init__(config) self.base = BaseModel(config) self.decoder = nn.Linear(5, 5) def forward(self, x): return self.decoder(self.base(x)) def tie_weights(self): self.decoder.weight = self.base.linear.weight class Prepare4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _prepare_4d_causal_attention_mask( None, (batch_size, seq_length), inputs_embeds, past_key_values_length ) return attention_mask class Create4dCausalAttentionMaskModel(nn.Module): def forward(self, inputs_embeds): batch_size, seq_length, _ = inputs_embeds.shape past_key_values_length = 4 attention_mask = _create_4d_causal_attention_mask( (batch_size, seq_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) return attention_mask class Prepare4dAttentionMaskModel(nn.Module): def forward(self, mask, inputs_embeds): attention_mask = _prepare_4d_attention_mask(mask, dtype=inputs_embeds.dtype) return attention_mask if is_flax_available(): from transformers import FlaxBertModel if is_tf_available(): from transformers import TFBertModel TINY_T5 = "patrickvonplaten/t5-tiny-random" TINY_BERT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-bert-for-token-classification" TINY_MISTRAL = "hf-internal-testing/tiny-random-MistralForCausalLM" def check_models_equal(model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal @require_torch class ModelUtilsTest(TestCasePlus): @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = BertConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, PretrainedConfig) model = BertModel.from_pretrained(model_name) model, loading_info = BertModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, PreTrainedModel) self.assertEqual(len(loading_info["missing_keys"]), 0) self.assertEqual(len(loading_info["unexpected_keys"]), 8) self.assertEqual(len(loading_info["mismatched_keys"]), 0) self.assertEqual(len(loading_info["error_msgs"]), 0) config = BertConfig.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) # Not sure this is the intended behavior. TODO fix Lysandre & Thom config.name_or_path = model_name model = BertModel.from_pretrained(model_name, output_attentions=True, output_hidden_states=True) self.assertEqual(model.config.output_hidden_states, True) self.assertEqual(model.config, config) def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") model = BertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = BertModel.from_pretrained(tmp_dir) model_loaded = BertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = BertModel.from_pretrained(model_id) model = BertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_with_different_pretrained_model_name(self): model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertIsNotNone(model) logger = logging.get_logger("transformers.configuration_utils") with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: BertModel.from_pretrained(TINY_T5) self.assertTrue("You are using a model of type t5 to instantiate a model of type bert" in cl.out) @require_accelerate def test_model_from_pretrained_with_none_quantization_config(self): # Needs a device_map for to enter the low_cpu_mem branch. We also load AutoModelForSequenceClassification # deliberately to enter the missing keys branch. model = AutoModelForSequenceClassification.from_pretrained( TINY_MISTRAL, device_map="auto", quantization_config=None ) self.assertIsNotNone(model) def test_model_from_config_torch_dtype(self): # test that the model can be instantiated with dtype of user's choice - as long as it's a # float dtype. To make it happen config.torch_dtype needs to be set before instantiating the # model from the config object. config = T5Config.from_pretrained(TINY_T5) model = AutoModel.from_config(config) # XXX: isn't supported # model = T5ForConditionalGeneration.from_config(config) self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_config(config, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # torch.set_default_dtype() supports only float dtypes, so will fail with non-float type with self.assertRaises(ValueError): model = AutoModel.from_config(config, torch_dtype=torch.int64) def test_model_from_pretrained_torch_dtype(self): # test that the model can be instantiated with dtype of either # 1. explicit from_pretrained's torch_dtype argument # 2. via autodiscovery by looking at model weights (torch_dtype="auto") # so if a model.half() was saved, we want it to be instantiated as such. # # test an explicit model class, but also AutoModel separately as the latter goes through a different code path model_path = self.get_auto_remove_tmp_dir() # baseline - we know TINY_T5 is fp32 model model = T5ForConditionalGeneration.from_pretrained(TINY_T5) self.assertEqual(model.dtype, torch.float32) def remove_torch_dtype(model_path): file = f"{model_path}/config.json" with open(file, "r", encoding="utf-8") as f: s = json.load(f) s.pop("torch_dtype") with open(file, "w", encoding="utf-8") as f: json.dump(s, f) # test the default fp32 save_pretrained => from_pretrained cycle model.save_pretrained(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path) self.assertEqual(model.dtype, torch.float32) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # 2. test torch_dtype="auto" via auto-derivation # now remove the torch_dtype entry from config.json and try "auto" again which should # perform auto-derivation from weights remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) # test forced loading in fp16 (even though the weights are in fp32) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with auto-detection model = model.half() model.save_pretrained(model_path) # 1. test torch_dtype="auto" via `config.torch_dtype` model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.config.torch_dtype, torch.float16) self.assertEqual(model.dtype, torch.float16) # tests `config.torch_dtype` saving with open(f"{model_path}/config.json") as f: config_dict = json.load(f) self.assertEqual(config_dict["torch_dtype"], "float16") # 2. test torch_dtype="auto" via auto-derivation # now same with using config info remove_torch_dtype(model_path) model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # 3. now retest that AutoModel behaves the same wrt torch_dtype="auto" as T5ForConditionalGeneration model = AutoModel.from_pretrained(model_path, torch_dtype="auto") self.assertEqual(model.dtype, torch.float16) # test fp16 save_pretrained, loaded with the explicit fp16 model = T5ForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test AutoModel separately as it goes through a different path # test auto-detection - as currently TINY_T5 doesn't have torch_dtype entry model = AutoModel.from_pretrained(TINY_T5, torch_dtype="auto") # test that the config object didn't get polluted with torch_dtype="auto" # there was a bug that after this call we ended up with config.torch_dtype=="auto" self.assertNotEqual(model.config.torch_dtype, "auto") # now test the outcome self.assertEqual(model.dtype, torch.float32) model = AutoModel.from_pretrained(TINY_T5, torch_dtype=torch.float16) self.assertEqual(model.dtype, torch.float16) # test model whose first param is not of a floating type, but int model = AutoModel.from_pretrained(TINY_BERT_FOR_TOKEN_CLASSIFICATION, torch_dtype="auto") self.assertEqual(model.dtype, torch.float32) def test_no_super_init_config_and_model(self): config = NoSuperInitConfig(attribute=32) model = NoSuperInitModel(config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = NoSuperInitModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_shard_checkpoint(self): # This is the model we will use, total size 340,000 bytes. model = torch.nn.Sequential( torch.nn.Linear(100, 200, bias=False), # size 80,000 torch.nn.Linear(200, 200, bias=False), # size 160,000 torch.nn.Linear(200, 100, bias=False), # size 80,000 torch.nn.Linear(100, 50, bias=False), # size 20,000 ) state_dict = model.state_dict() with self.subTest("No shard when max size is bigger than model size"): shards, index = shard_checkpoint(state_dict) self.assertIsNone(index) self.assertDictEqual(shards, {WEIGHTS_NAME: state_dict}) with self.subTest("Test sharding, no weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="300kB") # Split is first two layers then last two. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00002.bin", "1.weight": "pytorch_model-00001-of-00002.bin", "2.weight": "pytorch_model-00002-of-00002.bin", "3.weight": "pytorch_model-00002-of-00002.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"], "1.weight": state_dict["1.weight"]} shard2 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, {"pytorch_model-00001-of-00002.bin": shard1, "pytorch_model-00002-of-00002.bin": shard2} ) with self.subTest("Test sharding with weights bigger than max size"): shards, index = shard_checkpoint(state_dict, max_shard_size="100kB") # Split is first layer, second layer then last 2. self.assertDictEqual( index, { "metadata": {"total_size": 340000}, "weight_map": { "0.weight": "pytorch_model-00001-of-00003.bin", "1.weight": "pytorch_model-00002-of-00003.bin", "2.weight": "pytorch_model-00003-of-00003.bin", "3.weight": "pytorch_model-00003-of-00003.bin", }, }, ) shard1 = {"0.weight": state_dict["0.weight"]} shard2 = {"1.weight": state_dict["1.weight"]} shard3 = {"2.weight": state_dict["2.weight"], "3.weight": state_dict["3.weight"]} self.assertDictEqual( shards, { "pytorch_model-00001-of-00003.bin": shard1, "pytorch_model-00002-of-00003.bin": shard2, "pytorch_model-00003-of-00003.bin": shard3, }, ) def test_checkpoint_sharding_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["50kB", "50kiB", "100kB", "100kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size, safe_serialization=False) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".bin"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: state_dict = torch.load(shard_file) self.assertEqual(len(state_dict), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".bin")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_sharding_from_hub(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") for p1, p2 in zip(model.parameters(), ref_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_local_sharded_bin(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=False) weights_index_name = ".".join(WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["bin"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", safe_serialization=True) weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["safetensors"]) weights_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_checkpoint_variant_local_sharded_safe(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, variant="v2", max_shard_size="50kB", safe_serialization=True) weights_index_name = ".".join(SAFE_WEIGHTS_INDEX_NAME.split(".")[:-1] + ["v2"] + ["json"]) weights_index_file = os.path.join(tmp_dir, weights_index_name) self.assertTrue(os.path.isfile(weights_index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) for i in range(1, 5): weights_name = ".".join(SAFE_WEIGHTS_NAME.split(".")[:-1] + [f"v2-0000{i}-of-00005"] + ["safetensors"]) weights_name_file = os.path.join(tmp_dir, weights_name) self.assertTrue(os.path.isfile(weights_name_file)) with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained(tmp_dir) new_model = BertModel.from_pretrained(tmp_dir, variant="v2") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_checkpoint_variant_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_hub_sharded(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) @require_safetensors def test_checkpoint_variant_hub_sharded_safe(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(EnvironmentError): _ = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir ) model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant-sharded-safe", cache_dir=tmp_dir, variant="v2" ) self.assertIsNotNone(model) def test_checkpoint_variant_save_load_bin(self): with tempfile.TemporaryDirectory() as tmp_dir: model = BertModel.from_pretrained( "hf-internal-testing/tiny-random-bert-variant", cache_dir=tmp_dir, variant="v2" ) weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) model.save_pretrained(tmp_dir, variant="v2", safe_serialization=False) # saving will create a variant checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) model.save_pretrained(tmp_dir, safe_serialization=False) # saving shouldn't delete variant checkpoints weights_name = ".".join(WEIGHTS_NAME.split(".")[:-1] + ["v2"] + ["bin"]) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, weights_name))) # there should be a normal checkpoint self.assertTrue(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertIsNotNone(model) @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_functional(self): # test that we can use `from_pretrained(..., low_cpu_mem_usage=True)` with normal and # sharded models mnames = [ "hf-internal-testing/tiny-random-bert-sharded", "hf-internal-testing/tiny-random-bert", ] for mname in mnames: _ = BertModel.from_pretrained(mname, low_cpu_mem_usage=True) @require_usr_bin_time @require_accelerate @mark.accelerate_tests def test_from_pretrained_low_cpu_mem_usage_measured(self): # test that `from_pretrained(..., low_cpu_mem_usage=True)` uses less cpu memory than default mname = "google-bert/bert-base-cased" preamble = "from transformers import AutoModel" one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=False)' max_rss_normal = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_normal=}") one_liner_str = f'{preamble}; AutoModel.from_pretrained("{mname}", low_cpu_mem_usage=True)' max_rss_low_mem = self.python_one_liner_max_rss(one_liner_str) # print(f"{max_rss_low_mem=}") diff_bytes = max_rss_normal - max_rss_low_mem diff_percent = diff_bytes / max_rss_low_mem # print(f"{diff_bytes=}, {diff_percent=}") # ideally we would compare that the diff is close to ~1x checkpoint size in bytes, but # measuring cpu memory on linux is very tricky and inconsistent, so instead let's check that # it's at least 15% less cpu memory consumed self.assertGreater( diff_percent, 0.15, "should use less CPU memory for low_cpu_mem_usage=True, " f"but got max_rss_normal={max_rss_normal} and max_rss_low_mem={max_rss_low_mem}", ) # if you want to compare things manually, let's first look at the size of the model in bytes # model = BertModel.from_pretrained(mname, low_cpu_mem_usage=False) # total_numel = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) # total_bytes = total_numel * 4 # 420MB # Now the diff_bytes should be very close to total_bytes, but the reports are inconsistent. # The easiest way to test this is to switch the model and torch.load to do all the work on # gpu - that way one can measure exactly the total and peak memory used. Perhaps once we add # functionality to load models directly on gpu, this test can be rewritten to use torch's # cuda memory tracking and then we should be able to do a much more precise test. @require_accelerate @mark.accelerate_tests @require_torch_multi_accelerator @slow def test_model_parallelism_gpt2(self): device_map = {"transformer.wte": 0, "transformer.wpe": 0, "lm_head": 0, "transformer.ln_f": 1} for i in range(12): device_map[f"transformer.h.{i}"] = 0 if i <= 5 else 1 model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2", device_map=device_map) tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") inputs = tokenizer("Hello, my name is", return_tensors="pt") output = model.generate(inputs["input_ids"].to(0)) text_output = tokenizer.decode(output[0].tolist()) self.assertEqual(text_output, "Hello, my name is John. I'm a writer, and I'm a writer. I'm") @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_from_pretrained_disk_offload_task_model(self): model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "transformer.wte": 0, "transformer.wpe": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": "cpu", "transformer.h.3": "disk", "transformer.h.4": "disk", "transformer.ln_f": 0, "lm_head": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) model.save_pretrained(tmp_dir) new_model = AutoModelForCausalLM.from_pretrained(tmp_dir).to(0) outputs1 = new_model.to(0)(inputs) offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) # With state dict temp offload offload_folder = os.path.join(tmp_dir, "offload") new_model_with_offload = AutoModelForCausalLM.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1.logits.cpu(), outputs2.logits.cpu())) @require_accelerate @mark.accelerate_tests @require_torch_gpu def test_from_pretrained_disk_offload_derived_to_base_model(self): derived_model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") device_map = { "wte": 0, "wpe": 0, "h.0": "cpu", "h.1": "cpu", "h.2": "cpu", "h.3": "disk", "h.4": "disk", "ln_f": 0, } with tempfile.TemporaryDirectory() as tmp_dir: inputs = torch.tensor([[1, 2, 3]]).to(0) derived_model.save_pretrained(tmp_dir, use_safetensors=True) base_model = AutoModel.from_pretrained(tmp_dir) outputs1 = base_model.to(0)(inputs) # with disk offload offload_folder = os.path.join(tmp_dir, "offload") base_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder ) outputs2 = base_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) # With state dict temp offload new_model_with_offload = AutoModel.from_pretrained( tmp_dir, device_map=device_map, offload_folder=offload_folder, offload_state_dict=True, ) outputs2 = new_model_with_offload(inputs) self.assertTrue(torch.allclose(outputs1[0].cpu(), outputs2[0].cpu())) @slow @require_torch def test_from_pretrained_non_contiguous_checkpoint(self): # See: https://github.com/huggingface/transformers/pull/28414 # Tiny models on the Hub have contiguous weights, contrarily to google/owlvit model = OwlViTForObjectDetection.from_pretrained("fxmarty/owlvit-tiny-non-contiguous-weight") self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) model = OwlViTForObjectDetection.from_pretrained( "fxmarty/owlvit-tiny-non-contiguous-weight", device_map="auto" ) self.assertTrue(model.owlvit.visual_projection.weight.is_contiguous()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=False) model.save_pretrained(tmp_dir, safe_serialization=True) def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() def test_load_from_one_file(self): try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f ) config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained(tmp_file, config=config) finally: os.remove(tmp_file) def test_legacy_load_from_url(self): # This test is for deprecated behavior and can be removed in v5 config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") _ = BertModel.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config ) @require_safetensors def test_use_safetensors(self): # Should not raise anymore AutoModel.from_pretrained("hf-internal-testing/tiny-random-RobertaModel", use_safetensors=True) # test that error if only safetensors is available with self.assertRaises(OSError) as env_error: BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors", use_safetensors=False) self.assertTrue("does not appear to have a file named pytorch_model.bin" in str(env_error.exception)) # test that only safetensors if both available and use_safetensors=False with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=False, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("bin") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("safetensors") for f in all_downloaded_files)) # test that no safetensors if both available and use_safetensors=True with tempfile.TemporaryDirectory() as tmp_dir: CLIPTextModel.from_pretrained( "hf-internal-testing/diffusers-stable-diffusion-tiny-all", subfolder="text_encoder", use_safetensors=True, cache_dir=tmp_dir, ) all_downloaded_files = glob.glob(os.path.join(tmp_dir, "*", "snapshots", "*", "*", "*")) self.assertTrue(any(f.endswith("safetensors") for f in all_downloaded_files)) self.assertFalse(any(f.endswith("bin") for f in all_downloaded_files)) @require_safetensors def test_safetensors_save_and_load(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) # No pytorch_model.bin file, only a model.safetensors self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_save_and_load_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") # No pytorch_model.bin index file, only a model.safetensors index self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) # No regular weights file self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) new_model = BertModel.from_pretrained(tmp_dir) # Check models are equal for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) @require_safetensors def test_safetensors_load_from_hub_sharded(self): safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") # Check models are equal for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) def test_base_model_to_head_model_load(self): base_model = BaseModel(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: base_model.save_pretrained(tmp_dir, safe_serialization=False) # Can load a base model in a model with head model = ModelWithHead.from_pretrained(tmp_dir) for p1, p2 in zip(model.base.parameters(), base_model.parameters()): self.assertTrue(torch.allclose(p1, p2)) # It doesn't work if the state dict has a mix of keys of the head and base without prefix though. base_state_dict = base_model.state_dict() head_state_dict = model.state_dict() base_state_dict["linear2.weight"] = head_state_dict["linear2.weight"] base_state_dict["linear2.bias"] = head_state_dict["linear2.bias"] safe_save_file(base_state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with self.assertRaisesRegex( ValueError, "The state dictionary of the model you are trying to load is corrupted." ): _ = ModelWithHead.from_pretrained(tmp_dir) def test_tied_weights_reload(self): # Base model = BaseModelWithTiedWeights(PretrainedConfig()) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) new_model = BaseModelWithTiedWeights.from_pretrained(tmp_dir) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) state_dict = model.state_dict() # Remove tied weight from state_dict -> model should load with no complain of missing keys del state_dict["linear_2.weight"] torch.save(state_dict, os.path.join(tmp_dir, WEIGHTS_NAME)) new_model, load_info = BaseModelWithTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertListEqual(load_info["missing_keys"], []) self.assertIs(new_model.linear.weight, new_model.linear_2.weight) # With head model.save_pretrained(tmp_dir) new_model, load_info = ModelWithHeadAndTiedWeights.from_pretrained(tmp_dir, output_loading_info=True) self.assertIs(new_model.base.linear.weight, new_model.decoder.weight) # Should only complain about the missing bias self.assertListEqual(load_info["missing_keys"], ["decoder.bias"]) def test_unexpected_keys_warnings(self): model = ModelWithHead(PretrainedConfig()) logger = logging.get_logger("transformers.modeling_utils") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) # Loading the model with a new class, we don't get a warning for unexpected weights, just an info with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = BaseModel.from_pretrained(tmp_dir, output_loading_info=True) self.assertNotIn("were not used when initializing ModelWithHead", cl.out) self.assertEqual( set(loading_info["unexpected_keys"]), {"linear.weight", "linear.bias", "linear2.weight", "linear2.bias"}, ) # Loading the model with the same class, we do get a warning for unexpected weights state_dict = model.state_dict() state_dict["added_key"] = copy.deepcopy(state_dict["linear.weight"]) safe_save_file(state_dict, os.path.join(tmp_dir, SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: _, loading_info = ModelWithHead.from_pretrained(tmp_dir, output_loading_info=True) self.assertIn("were not used when initializing ModelWithHead: ['added_key']", cl.out) self.assertEqual(loading_info["unexpected_keys"], ["added_key"]) def test_warn_if_padding_and_no_attention_mask(self): logger = logging.get_logger("transformers.modeling_utils") with self.subTest("Ensure no warnings when pad_token_id is None."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config_no_pad_token = PretrainedConfig() config_no_pad_token.pad_token_id = None model = ModelWithHead(config_no_pad_token) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there is an attention_mask."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure no warnings when there are no pad_token_ids in the input_ids."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[1, 345, 232, 328, 740, 140, 1695, 69, 6078, 2341, 25]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertNotIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids start with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure a warning is shown when the input_ids end with a pad_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[432, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) with self.subTest("Ensure that the warning is shown at most once."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertEqual(cl.out.count("We strongly recommend passing in an `attention_mask`"), 1) with self.subTest("Ensure a different warning is shown when the pad_token_id is equal to the bos_token_id."): logger.warning_once.cache_clear() with LoggingLevel(logging.WARNING): with CaptureLogger(logger) as cl: config = PretrainedConfig() config.pad_token_id = 0 config.bos_token_id = config.pad_token_id model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 0, 0]]) model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) self.assertIn("You may ignore this warning if your `pad_token_id`", cl.out) if not is_torchdynamo_available(): return with self.subTest("Ensure that the warning code is skipped when compiling with torchdynamo."): logger.warning_once.cache_clear() from torch._dynamo import config, testing config = PretrainedConfig() config.pad_token_id = 0 model = ModelWithHead(config) input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 432, 5232]]) def f(input_ids): model.warn_if_padding_and_no_attention_mask(input_ids, attention_mask=None) compile_counter = testing.CompileCounter() opt_fn = torch.compile(f, dynamic=True, backend=compile_counter) opt_fn(input_ids) self.assertEqual(compile_counter.frame_count, 0) @require_torch_accelerator @slow def test_pretrained_low_mem_new_config(self): # Checking for 1 model(the same one which was described in the issue) . model_ids = ["openai-community/gpt2"] for model_id in model_ids: model_config = AutoConfig.from_pretrained(pretrained_model_name_or_path=model_id) model_config.n_layer = 48 model_config.n_head = 25 model_config.n_embd = 1600 model = AutoModelForCausalLM.from_pretrained( pretrained_model_name_or_path=model_id, config=model_config, ignore_mismatched_sizes=True, torch_dtype=torch.float16, low_cpu_mem_usage=True, ) model_ref = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path=model_id) self.assertEqual(model.__class__.__name__, model_ref.__class__.__name__) def test_generation_config_is_loaded_with_model(self): # Note: `joaogante/tiny-random-gpt2-with-generation-config` has a `generation_config.json` containing a dummy # `transformers_version` field set to `foo`. If loading the file fails, this test also fails. # 1. Load without further parameters model = AutoModelForCausalLM.from_pretrained( "joaogante/tiny-random-gpt2-with-generation-config", use_safetensors=False ) self.assertEqual(model.generation_config.transformers_version, "foo") # 2. Load with `device_map` model = AutoModelForCausalLM.from_pretrained( "joaogante/tiny-random-gpt2-with-generation-config", device_map="auto", use_safetensors=False ) self.assertEqual(model.generation_config.transformers_version, "foo") @require_safetensors def test_safetensors_torch_from_torch(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors @require_flax def test_safetensors_torch_from_flax(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_tf @require_safetensors def test_safetensors_torch_from_tf(self): hub_model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(hub_model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) @require_safetensors def test_safetensors_torch_from_torch_sharded(self): model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") new_model = BertModel.from_pretrained(tmp_dir) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_modifying_model_config_causes_warning_saving_generation_config(self): model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") model.config.top_k = 1 with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs("transformers.modeling_utils", level="WARNING") as logs: model.save_pretrained(tmp_dir) self.assertEqual(len(logs.output), 1) self.assertIn("Your generation config was originally created from the model config", logs.output[0]) @require_safetensors def test_model_from_pretrained_from_mlx(self): from safetensors import safe_open model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-mistral-mlx") self.assertIsNotNone(model) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True) with safe_open(os.path.join(tmp_dir, "model.safetensors"), framework="pt") as f: metadata = f.metadata() self.assertEqual(metadata.get("format"), "pt") new_model = AutoModelForCausalLM.from_pretrained(tmp_dir) input_ids = torch.randint(100, 1000, (1, 10)) with torch.no_grad(): outputs = model(input_ids) outputs_from_saved = new_model(input_ids) self.assertTrue(torch.allclose(outputs_from_saved["logits"], outputs["logits"])) @slow @require_torch class ModelOnTheFlyConversionTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.user = "huggingface-hub-ci" cls.token = os.getenv("HUGGINGFACE_PRODUCTION_USER_TOKEN", None) if cls.token is None: raise ValueError("Cannot run tests as secret isn't setup.") cls.api = HfApi(token=cls.token) def setUp(self) -> None: self.repo_name = f"{self.user}/test-model-on-the-fly-{uuid.uuid4()}" def tearDown(self) -> None: self.api.delete_repo(self.repo_name) def test_safetensors_on_the_fly_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name, token=self.token) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb") converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_private(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub( self.repo_name, token=self.token, safe_serialization=False, max_shard_size="200kb", private=True ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_sharded_conversion_gated(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, max_shard_size="200kb", safe_serialization=False) headers = {"Authorization": f"Bearer {self.token}"} requests.put( f"https://huggingface.co/api/models/{self.repo_name}/settings", json={"gated": "auto"}, headers=headers ) converted_model = BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("Initial and converted models are equal"): for p1, p2 in zip(initial_model.parameters(), converted_model.parameters()): self.assertTrue(torch.equal(p1, p2)) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, "SFconvertbot") self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") @unittest.skip("Edge case, should work once the Space is updated`") def test_safetensors_on_the_fly_wrong_user_opened_pr(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, private=True) BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) # This should have opened a PR with the user's account with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) discussion = next(discussions) self.assertEqual(discussion.author, self.user) self.assertEqual(discussion.title, "Adding `safetensors` variant of this model") # We now switch the repo visibility to public self.api.update_repo_visibility(self.repo_name, private=False) # We once again call from_pretrained, which should call the bot to open a PR BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token) with self.subTest("PR was open with the safetensors account"): discussions = self.api.get_repo_discussions(self.repo_name) bot_opened_pr = None bot_opened_pr_title = None for discussion in discussions: if discussion.author == "SFconvertBot": bot_opened_pr = True bot_opened_pr_title = discussion.title self.assertTrue(bot_opened_pr) self.assertEqual(bot_opened_pr_title, "Adding `safetensors` variant of this model") def test_safetensors_on_the_fly_specific_revision(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) initial_model = BertModel(config) # Push a model on `main` initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False) # Push a model on a given revision initial_model.push_to_hub(self.repo_name, token=self.token, safe_serialization=False, revision="new-branch") # Try to convert the model on that revision should raise with self.assertRaises(EnvironmentError): BertModel.from_pretrained(self.repo_name, use_safetensors=True, token=self.token, revision="new-branch") @require_torch @is_staging_test class ModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-org") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-model-with-tags") except HTTPError: pass @unittest.skip("This test is flaky") def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("test-model", token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="test-model") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model", push_to_hub=True, token=self._token) new_model = BertModel.from_pretrained(f"{USER}/test-model") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_with_description(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) COMMIT_DESCRIPTION = """ The commit description supports markdown synthax see: ```python >>> form transformers import AutoConfig >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased") ``` """ commit_details = model.push_to_hub( "test-model", use_auth_token=self._token, create_pr=True, commit_description=COMMIT_DESCRIPTION ) self.assertEqual(commit_details.commit_description, COMMIT_DESCRIPTION) @unittest.skip("This test is flaky") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertModel(config) model.push_to_hub("valid_org/test-model-org", token=self._token) new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=self._token, repo_id="valid_org/test-model-org") new_model = BertModel.from_pretrained("valid_org/test-model-org") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) def test_push_to_hub_dynamic_model(self): CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) model.push_to_hub("test-dynamic-model", token=self._token) # checks self.assertDictEqual( config.auto_map, {"AutoConfig": "custom_configuration.CustomConfig", "AutoModel": "custom_modeling.CustomModel"}, ) new_model = AutoModel.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) # Can't make an isinstance check because the new_model is from the CustomModel class of a dynamic module self.assertEqual(new_model.__class__.__name__, "CustomModel") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) config = AutoConfig.from_pretrained(f"{USER}/test-dynamic-model", trust_remote_code=True) new_model = AutoModel.from_config(config, trust_remote_code=True) self.assertEqual(new_model.__class__.__name__, "CustomModel") def test_push_to_hub_with_tags(self): from huggingface_hub import ModelCard new_tags = ["tag-1", "tag-2"] CustomConfig.register_for_auto_class() CustomModel.register_for_auto_class() config = CustomConfig(hidden_size=32) model = CustomModel(config) self.assertTrue(model.model_tags is None) model.add_model_tags(new_tags) self.assertTrue(model.model_tags == new_tags) model.push_to_hub("test-dynamic-model-with-tags", token=self._token) loaded_model_card = ModelCard.load(f"{USER}/test-dynamic-model-with-tags") self.assertEqual(loaded_model_card.data.tags, new_tags) @require_torch class AttentionMaskTester(unittest.TestCase): def check_non_causal(self, bsz, q_len, kv_len, mask_2d, mask_4d): mask_indices = (mask_2d != 1)[:, None].broadcast_to((bsz, q_len, kv_len)) mask_4d_values = mask_4d[:, 0][mask_indices] is_inf = mask_4d_values == -float("inf") is_min = mask_4d_values == torch.finfo(mask_4d.dtype).min assert torch.logical_or(is_inf, is_min).all() def check_to_4d(self, mask_converter, q_len, kv_len, additional_mask=None, bsz=3): mask_2d = torch.ones((bsz, kv_len), device=torch_device, dtype=torch.long) if additional_mask is not None: for bsz_idx, seq_idx in additional_mask: mask_2d[bsz_idx, seq_idx] = 0 mask_4d = mask_converter.to_4d(mask_2d, query_length=q_len, key_value_length=kv_len, dtype=torch.float32) assert mask_4d.shape == (bsz, 1, q_len, kv_len) # make sure there are no overflows assert mask_4d.min() != float("-inf") context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif not mask_converter.is_causal and context is None: if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == 0 if 0 in mask_2d: self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked if 0 not in mask_2d: assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked if 0 in mask_2d: # at least causal mask + maybe more assert (mask_4d != 0).sum().cpu().item() >= num_tokens_masked self.check_non_causal(bsz, q_len, kv_len, mask_2d, mask_4d) def check_to_causal(self, mask_converter, q_len, kv_len, bsz=3): mask_4d = mask_converter.to_causal_4d( bsz, query_length=q_len, key_value_length=kv_len, device=torch_device, dtype=torch.float32 ) if q_len == 1 and mask_converter.sliding_window is None: # no causal mask if q_len is 1 assert mask_4d is None return context = mask_converter.sliding_window if mask_converter.is_causal and context is None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = bsz * (q_len * (q_len - 1) // 2) assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked elif not mask_converter.is_causal and context is None: assert (mask_4d != 0).sum().cpu().item() == 0 elif mask_converter.is_causal and context is not None: # k * (k+1) / 2 tokens are masked in triangualar masks num_tokens_masked = (q_len * (q_len - 1) // 2) + self.compute_num_context_mask(kv_len, context, q_len) num_tokens_masked = bsz * num_tokens_masked assert (mask_4d != 0).sum().cpu().item() == num_tokens_masked def compute_num_context_mask(self, kv_len, context, q_len): # This function computes the # of attention tokens that are added for # the sliding window c_mask_len = kv_len - context - 1 num_mask_triangle = c_mask_len * (c_mask_len + 1) // 2 cut_mask_len = max(c_mask_len - q_len, 0) num_cut_mask = cut_mask_len * (cut_mask_len + 1) // 2 return num_mask_triangle - num_cut_mask def test_2d_to_4d_causal(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) # check that the mask does not overflow on causal masked tokens self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 0), (1, 0), (1, 1)]) def test_2d_to_4d(self): mask_converter = AttentionMaskConverter(is_causal=False) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_2d_to_4d_causal_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=5) # auto-regressive use case self.check_to_4d(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_4d(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_4d(mask_converter, q_len=7, kv_len=7) # same with extra attention masks self.check_to_4d(mask_converter, q_len=1, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=3, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) self.check_to_4d(mask_converter, q_len=7, kv_len=7, additional_mask=[(0, 2), (1, 3), (2, 0)]) def test_causal_mask(self): mask_converter = AttentionMaskConverter(is_causal=True) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_causal_mask_sliding(self): mask_converter = AttentionMaskConverter(is_causal=True, sliding_window=3) # auto-regressive use case self.check_to_causal(mask_converter, q_len=1, kv_len=7) # special auto-regressive case self.check_to_causal(mask_converter, q_len=3, kv_len=7) # non auto-regressive case self.check_to_causal(mask_converter, q_len=7, kv_len=7) def test_torch_compile_fullgraph(self): model = Prepare4dCausalAttentionMaskModel() inputs_embeds = torch.rand([1, 3, 32]) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Create4dCausalAttentionMaskModel() inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) model = Prepare4dAttentionMaskModel() mask = torch.ones(2, 4) mask[0, :2] = 0 inputs_embeds = torch.rand(2, 4, 16) res_non_compiled = model(mask, inputs_embeds) compiled_model = torch.compile(model, fullgraph=True) res_compiled = compiled_model(mask, inputs_embeds) self.assertTrue(torch.equal(res_non_compiled, res_compiled)) @require_torch @slow def test_unmask_unattended_left_padding(self): attention_mask = torch.Tensor([[0, 0, 1], [1, 1, 1], [0, 1, 1]]).to(torch.int64) expanded_mask = torch.Tensor( [ [[[0, 0, 0], [0, 0, 0], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[0, 0, 0], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) reference_output = torch.Tensor( [ [[[1, 1, 1], [1, 1, 1], [0, 0, 1]]], [[[1, 0, 0], [1, 1, 0], [1, 1, 1]]], [[[1, 1, 1], [0, 1, 0], [0, 1, 1]]], ] ).to(torch.int64) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=1) self.assertTrue(torch.equal(result, reference_output)) attention_mask = torch.Tensor([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [0, 1, 1, 1, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) min_inf = torch.finfo(torch.float32).min reference_output = torch.Tensor( [ [ [ [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [min_inf, min_inf, 0, min_inf, min_inf], [min_inf, min_inf, 0, 0, min_inf], [min_inf, min_inf, 0, 0, 0], ] ], [ [ [0, min_inf, min_inf, min_inf, min_inf], [0, 0, min_inf, min_inf, min_inf], [0, 0, 0, min_inf, min_inf], [0, 0, 0, 0, min_inf], [0, 0, 0, 0, 0], ] ], [ [ [0, 0, 0, 0, 0], [min_inf, 0, min_inf, min_inf, min_inf], [min_inf, 0, 0, min_inf, min_inf], [min_inf, 0, 0, 0, min_inf], [min_inf, 0, 0, 0, 0], ] ], ] ) self.assertTrue(torch.equal(reference_output, result)) @require_torch @slow def test_unmask_unattended_right_padding(self): attention_mask = torch.Tensor([[1, 1, 1, 0], [1, 1, 1, 1], [1, 1, 0, 0]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch @slow def test_unmask_unattended_random_mask(self): attention_mask = torch.Tensor([[1, 0, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]]).to(torch.int64) attn_mask_converter = AttentionMaskConverter(is_causal=True) past_key_values_length = 0 key_value_length = attention_mask.shape[-1] + past_key_values_length expanded_mask = attn_mask_converter.to_4d( attention_mask, attention_mask.shape[-1], key_value_length=key_value_length, dtype=torch.float32 ) result = AttentionMaskConverter._unmask_unattended(expanded_mask, attention_mask, unmasked_value=0) self.assertTrue(torch.equal(expanded_mask, result)) @require_torch class TestAttentionImplementation(unittest.TestCase): def test_error_no_sdpa_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="sdpa") self.assertTrue( "does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention" in str(cm.exception) ) _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") def test_error_no_flash_available(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_no_flash_available_with_config(self): with self.assertRaises(ValueError) as cm: config = AutoConfig.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel") _ = AutoModel.from_pretrained( "hf-tiny-model-private/tiny-random-MCTCTModel", config=config, attn_implementation="flash_attention_2" ) self.assertTrue("does not support Flash Attention 2.0" in str(cm.exception)) def test_error_wrong_attn_implementation(self): with self.assertRaises(ValueError) as cm: _ = AutoModel.from_pretrained("hf-tiny-model-private/tiny-random-MCTCTModel", attn_implementation="foo") self.assertTrue('The only possible arguments are `attn_implementation="eager"' in str(cm.exception)) def test_not_available_flash(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="flash_attention_2" ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_flash_with_config(self): if is_flash_attn_2_available(): self.skipTest("Please uninstall flash-attn package to run test_not_available_flash") config = AutoConfig.from_pretrained("hf-internal-testing/tiny-random-GPTBigCodeModel") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", config=config, attn_implementation="flash_attention_2", ) self.assertTrue("the package flash_attn seems to be not installed" in str(cm.exception)) def test_not_available_sdpa(self): if is_torch_sdpa_available(): self.skipTest("This test requires torch<=2.0") with self.assertRaises(ImportError) as cm: _ = AutoModel.from_pretrained( "hf-internal-testing/tiny-random-GPTBigCodeModel", attn_implementation="sdpa" ) self.assertTrue("PyTorch SDPA requirements in Transformers are not met" in str(cm.exception)) @slow @require_torch_gpu class Mask4DTestBase(unittest.TestCase): def tearDown(self): gc.collect() torch.cuda.empty_cache() def get_test_data(self): texts = ["the cat sat", "the cat had", "the cat is"] encoded = [self.tokenizer.encode(t) for t in texts] input_0 = torch.tensor(encoded, device=torch_device) # tensor([[ 1, 278, 6635, 3290], # [ 1, 278, 6635, 750], # [ 1, 278, 6635, 338]], device='cuda:0') position_ids_0 = torch.tensor([[0, 1, 2, 3]] * 3, device=torch_device, dtype=torch.int64) # Combining common prefix with the unique ending tokens: input_1 = torch.cat([input_0[0][:-1], input_0[:, -1]]).unsqueeze(0) # tensor([[ 1, 278, 6635, 3290, 750, 338]], device='cuda:0') # Creating a 4D mask where each of the last 3 tokens do not attend to each other. mask_1 = torch.tensor( [ [ [ [1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 1], ] ] ], device="cuda:0", dtype=torch.int64, ) # Creating a position_ids tensor. note the repeating figures in the end. position_ids_1 = torch.tensor([[0, 1, 2, 3, 3, 3]], device=torch_device, dtype=torch.int64) return input_0, position_ids_0, input_1, mask_1, position_ids_1 @require_torch_gpu class Mask4DTestFP32(Mask4DTestBase): def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow self.model_dtype = torch.float32 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=self.model_dtype).to(torch_device) def test_attention(self): """comparing outputs of attention layer""" input_0, position_ids_0, input_1, mask_1, position_ids_1 = self.get_test_data() causal_mask_1 = (1 - mask_1).to(self.model_dtype) * torch.finfo(self.model_dtype).min hid_0 = self.model.model.embed_tokens(input_0) outs_0 = self.model.model.layers[0].self_attn.forward(hid_0, position_ids=position_ids_0)[0] # outs_0.shape == torch.Size([3, 4, 768]) hid_1 = self.model.model.embed_tokens(input_1) outs_1 = self.model.model.layers[0].self_attn.forward( hid_1, attention_mask=causal_mask_1, position_ids=position_ids_1 )[0] # outs_1.shape == torch.Size([1, 6, 768]) outs_0_last_tokens = outs_0[:, -1, :] # last tokens in each batch line outs_1_last_tokens = outs_1[0, -3:, :] # last three tokens torch.testing.assert_close(outs_0_last_tokens, outs_1_last_tokens) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, position_ids_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0, position_ids=position_ids_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens torch.testing.assert_close(logits_0_last_tokens, logits_1_last_tokens) @require_torch_gpu class Mask4DTestFP16(Mask4DTestBase): test_attention = Mask4DTestFP32.test_attention def setUp(self): model_name = "JackFram/llama-68m" # small Llama-like model from FlexFlow self.model_dtype = torch.float16 self.tokenizer = AutoTokenizer.from_pretrained(model_name) self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=self.model_dtype).to(torch_device) def test_causal_model_logits(self): """comparing logits outputs of whole inner model""" input_0, position_ids_0, input_1, mask_1, position_ids_1 = self.get_test_data() logits_0 = self.model.forward(input_0, position_ids=position_ids_0).logits logits_1 = self.model.forward(input_1, attention_mask=mask_1.bool(), position_ids=position_ids_1).logits logits_0_last_tokens = logits_0[:, -1, :] # last tokens in each batch line logits_1_last_tokens = logits_1[0, -3:, :] # last three tokens indices_0 = logits_0_last_tokens.sort(descending=True).indices indices_1 = logits_1_last_tokens.sort(descending=True).indices # checking logits, but note relaxed tolerances for FP16 torch.testing.assert_close(logits_0_last_tokens, logits_1_last_tokens, atol=0.02, rtol=0.001) # checking tokens order for the top tokens for token_ids_0, token_ids_1 in zip(indices_0, indices_1): self.assertTrue(torch.equal(token_ids_0[:128], token_ids_1[:128]))
transformers/tests/test_modeling_utils.py/0
{ "file_path": "transformers/tests/test_modeling_utils.py", "repo_id": "transformers", "token_count": 42980 }
381
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class TestActivations(unittest.TestCase): def test_gelu_versions(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") self.assertTrue(torch.allclose(gelu_python(x), torch_builtin(x))) self.assertFalse(torch.allclose(gelu_python(x), gelu_new(x))) def test_gelu_10(self): x = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100]) torch_builtin = get_activation("gelu") gelu10 = get_activation("gelu_10") y_gelu = torch_builtin(x) y_gelu_10 = gelu10(x) clipped_mask = torch.where(y_gelu_10 < 10.0, 1, 0) self.assertTrue(torch.max(y_gelu_10).item() == 10.0) self.assertTrue(torch.allclose(y_gelu * clipped_mask, y_gelu_10 * clipped_mask)) def test_get_activation(self): get_activation("gelu") get_activation("gelu_10") get_activation("gelu_fast") get_activation("gelu_new") get_activation("gelu_python") get_activation("gelu_pytorch_tanh") get_activation("linear") get_activation("mish") get_activation("quick_gelu") get_activation("relu") get_activation("sigmoid") get_activation("silu") get_activation("swish") get_activation("tanh") with self.assertRaises(KeyError): get_activation("bogus") with self.assertRaises(KeyError): get_activation(None) def test_activations_are_distinct_objects(self): act1 = get_activation("gelu") act1.a = 1 act2 = get_activation("gelu") self.assertEqual(act1.a, 1) with self.assertRaises(AttributeError): _ = act2.a
transformers/tests/utils/test_activations.py/0
{ "file_path": "transformers/tests/utils/test_activations.py", "repo_id": "transformers", "token_count": 1061 }
382
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile import unittest from transformers.modelcard import ModelCard class ModelCardTester(unittest.TestCase): def setUp(self): self.inputs_dict = { "model_details": { "Organization": "testing", "Model date": "today", "Model version": "v2.1, Developed by Test Corp in 2019.", "Architecture": "Convolutional Neural Network.", }, "metrics": "BLEU and ROUGE-1", "evaluation_data": { "Datasets": {"BLEU": "My-great-dataset-v1", "ROUGE-1": "My-short-dataset-v2.1"}, "Preprocessing": "See details on https://arxiv.org/pdf/1810.03993.pdf", }, "training_data": { "Dataset": "English Wikipedia dump dated 2018-12-01", "Preprocessing": ( "Using SentencePiece vocabulary of size 52k tokens. See details on" " https://arxiv.org/pdf/1810.03993.pdf" ), }, "quantitative_analyses": {"BLEU": 55.1, "ROUGE-1": 76}, } def test_model_card_common_properties(self): modelcard = ModelCard.from_dict(self.inputs_dict) self.assertTrue(hasattr(modelcard, "model_details")) self.assertTrue(hasattr(modelcard, "intended_use")) self.assertTrue(hasattr(modelcard, "factors")) self.assertTrue(hasattr(modelcard, "metrics")) self.assertTrue(hasattr(modelcard, "evaluation_data")) self.assertTrue(hasattr(modelcard, "training_data")) self.assertTrue(hasattr(modelcard, "quantitative_analyses")) self.assertTrue(hasattr(modelcard, "ethical_considerations")) self.assertTrue(hasattr(modelcard, "caveats_and_recommendations")) def test_model_card_to_json_string(self): modelcard = ModelCard.from_dict(self.inputs_dict) obj = json.loads(modelcard.to_json_string()) for key, value in self.inputs_dict.items(): self.assertEqual(obj[key], value) def test_model_card_to_json_file(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: filename = os.path.join(tmpdirname, "modelcard.json") model_card_first.to_json_file(filename) model_card_second = ModelCard.from_json_file(filename) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict()) def test_model_card_from_and_save_pretrained(self): model_card_first = ModelCard.from_dict(self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: model_card_first.save_pretrained(tmpdirname) model_card_second = ModelCard.from_pretrained(tmpdirname) self.assertEqual(model_card_second.to_dict(), model_card_first.to_dict())
transformers/tests/utils/test_model_card.py/0
{ "file_path": "transformers/tests/utils/test_model_card.py", "repo_id": "transformers", "token_count": 1475 }
383
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(".") r""" The argument `test_file` in this file refers to a model test file. This should be a string of the from `tests/models/*/test_modeling_*.py`. """ def get_module_path(test_file): """Return the module path of a model test file.""" components = test_file.split(os.path.sep) if components[0:2] != ["tests", "models"]: raise ValueError( "`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got " f"{test_file} instead." ) test_fn = components[-1] if not test_fn.endswith("py"): raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead.") if not test_fn.startswith("test_modeling_"): raise ValueError( f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) components = components[:-1] + [test_fn.replace(".py", "")] test_module_path = ".".join(components) return test_module_path def get_test_module(test_file): """Get the module of a model test file.""" test_module_path = get_module_path(test_file) test_module = importlib.import_module(test_module_path) return test_module def get_tester_classes(test_file): """Get all classes in a model test file whose names ends with `ModelTester`.""" tester_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): if attr.endswith("ModelTester"): tester_classes.append(getattr(test_module, attr)) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_classes(test_file): """Get all [test] classes in a model test file with attribute `all_model_classes` that are non-empty. These are usually the (model) test classes containing the (non-slow) tests to run and are subclasses of one of the classes `ModelTesterMixin`, `TFModelTesterMixin` or `FlaxModelTesterMixin`, as well as a subclass of `unittest.TestCase`. Exceptions include `RagTestMixin` (and its subclasses). """ test_classes = [] test_module = get_test_module(test_file) for attr in dir(test_module): attr_value = getattr(test_module, attr) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). model_classes = getattr(attr_value, "all_model_classes", []) if len(model_classes) > 0: test_classes.append(attr_value) # sort with class names return sorted(test_classes, key=lambda x: x.__name__) def get_model_classes(test_file): """Get all model classes that appear in `all_model_classes` attributes in a model test file.""" test_classes = get_test_classes(test_file) model_classes = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes) # sort with class names return sorted(model_classes, key=lambda x: x.__name__) def get_model_tester_from_test_class(test_class): """Get the model tester class of a model test class.""" test = test_class() if hasattr(test, "setUp"): test.setUp() model_tester = None if hasattr(test, "model_tester"): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: model_tester = test.model_tester.__class__ return model_tester def get_test_classes_for_model(test_file, model_class): """Get all [test] classes in `test_file` that have `model_class` in their `all_model_classes`.""" test_classes = get_test_classes(test_file) target_test_classes = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(test_class) # sort with class names return sorted(target_test_classes, key=lambda x: x.__name__) def get_tester_classes_for_model(test_file, model_class): """Get all model tester classes in `test_file` that are associated to `model_class`.""" test_classes = get_test_classes_for_model(test_file, model_class) tester_classes = [] for test_class in test_classes: tester_class = get_model_tester_from_test_class(test_class) if tester_class is not None: tester_classes.append(tester_class) # sort with class names return sorted(tester_classes, key=lambda x: x.__name__) def get_test_to_tester_mapping(test_file): """Get a mapping from [test] classes to model tester classes in `test_file`. This uses `get_test_classes` which may return classes that are NOT subclasses of `unittest.TestCase`. """ test_classes = get_test_classes(test_file) test_tester_mapping = {test_class: get_model_tester_from_test_class(test_class) for test_class in test_classes} return test_tester_mapping def get_model_to_test_mapping(test_file): """Get a mapping from model classes to test classes in `test_file`.""" model_classes = get_model_classes(test_file) model_test_mapping = { model_class: get_test_classes_for_model(test_file, model_class) for model_class in model_classes } return model_test_mapping def get_model_to_tester_mapping(test_file): """Get a mapping from model classes to model tester classes in `test_file`.""" model_classes = get_model_classes(test_file) model_to_tester_mapping = { model_class: get_tester_classes_for_model(test_file, model_class) for model_class in model_classes } return model_to_tester_mapping def to_json(o): """Make the information succinct and easy to read. Avoid the full class representation like `<class 'transformers.models.bert.modeling_bert.BertForMaskedLM'>` when displaying the results. Instead, we use class name (`BertForMaskedLM`) for the readability. """ if isinstance(o, str): return o elif isinstance(o, type): return o.__name__ elif isinstance(o, (list, tuple)): return [to_json(x) for x in o] elif isinstance(o, dict): return {to_json(k): to_json(v) for k, v in o.items()} else: return o
transformers/utils/get_test_info.py/0
{ "file_path": "transformers/utils/get_test_info.py", "repo_id": "transformers", "token_count": 2577 }
384
from transformers import ProcessorMixin class CustomProcessor(ProcessorMixin): feature_extractor_class = "AutoFeatureExtractor" tokenizer_class = "AutoTokenizer"
transformers/utils/test_module/custom_processing.py/0
{ "file_path": "transformers/utils/test_module/custom_processing.py", "repo_id": "transformers", "token_count": 51 }
385
cff-version: 1.2.0 title: 'TRL: Transformer Reinforcement Learning' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Leandro family-names: von Werra - given-names: Younes family-names: Belkada - given-names: Lewis family-names: Tunstall - given-names: Edward family-names: Beeching - given-names: Tristan family-names: Thrush - given-names: Nathan family-names: Lambert repository-code: 'https://github.com/huggingface/trl' abstract: "With trl you can train transformer language models with Proximal Policy Optimization (PPO). The library is built on top of the transformers library by \U0001F917 Hugging Face. Therefore, pre-trained language models can be directly loaded via transformers. At this point, most decoder and encoder-decoder architectures are supported." keywords: - rlhf - deep-learning - pytorch - transformers license: Apache-2.0 version: 0.2.1
trl/CITATION.cff/0
{ "file_path": "trl/CITATION.cff", "repo_id": "trl", "token_count": 313 }
386
BENCHMARK_SCRIPT="benchmark/benchmark_level1.sh" \ BENCHMARK_PLOT_SCRIPT="benchmark/benchmark_level1_plot.sh" \ bash benchmark/benchmark_and_report.sh
trl/benchmark/regression_test.sh/0
{ "file_path": "trl/benchmark/regression_test.sh", "repo_id": "trl", "token_count": 60 }
387
# Installation You can install TRL either from pypi or from source: ## pypi Install the library with pip: ```bash pip install trl ``` ### Source You can also install the latest version from source. First clone the repo and then run the installation with `pip`: ```bash git clone https://github.com/huggingface/trl.git cd trl/ pip install -e . ``` If you want the development install you can replace the pip install with the following: ```bash pip install -e ".[dev]" ```
trl/docs/source/installation.mdx/0
{ "file_path": "trl/docs/source/installation.mdx", "repo_id": "trl", "token_count": 147 }
388
# Using LLaMA models with TRL We've begun rolling out examples to use Meta's LLaMA models in `trl` (see [Meta's LLaMA release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) for the original LLaMA model). ## Efficient training strategies Even training the smallest LLaMA model requires an enormous amount of memory. Some quick math: in bf16, every parameter uses 2 bytes (in fp32 4 bytes) in addition to 8 bytes used, e.g., in the Adam optimizer (see the [performance docs](https://huggingface.co/docs/transformers/perf_train_gpu_one#optimizer) in Transformers for more info). So a 7B parameter model would use `(2+8)*7B=70GB` just to fit in memory and would likely need more when you compute intermediate values such as attention scores. So you couldn’t train the model even on a single 80GB A100 like that. You can use some tricks, like more efficient optimizers of half-precision training, to squeeze a bit more into memory, but you’ll run out sooner or later. Another option is to use Parameter-Efficient Fine-Tuning (PEFT) techniques, such as the [`peft`](https://github.com/huggingface/peft) library, which can perform low-rank adaptation (LoRA) on a model loaded in 8-bit. For more on `peft` + `trl`, see the [docs](https://huggingface.co/docs/trl/sentiment_tuning_peft). Loading the model in 8bit reduces the memory footprint drastically since you only need one byte per parameter for the weights (e.g. 7B LlaMa is 7GB in memory). Instead of training the original weights directly, LoRA adds small adapter layers on top of some specific layers (usually the attention layers); thus, the number of trainable parameters is drastically reduced. In this scenario, a rule of thumb is to allocate ~1.2-1.4GB per billion parameters (depending on the batch size and sequence length) to fit the entire fine-tuning setup. This enables fine-tuning larger models (up to 50-60B scale models on a NVIDIA A100 80GB) at low cost. Now we can fit very large models into a single GPU, but the training might still be very slow. The simplest strategy in this scenario is data parallelism: we replicate the same training setup into separate GPUs and pass different batches to each GPU. With this, you can parallelize the forward/backward passes of the model and scale with the number of GPUs. ![chapter10_ddp.png](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/blog/stackllama/chapter10_ddp.png) We use either the `transformers.Trainer` or `accelerate`, which both support data parallelism without any code changes, by simply passing arguments when calling the scripts with `torchrun` or `accelerate launch`. The following runs a training script with 8 GPUs on a single machine with `accelerate` and `torchrun`, respectively. ```bash accelerate launch --multi_gpu --num_machines 1 --num_processes 8 my_accelerate_script.py torchrun --nnodes 1 --nproc_per_node 8 my_torch_script.py ``` ## Supervised fine-tuning Before we start training reward models and tuning our model with RL, it helps if the model is already good in the domain we are interested in. In our case, we want it to answer questions, while for other use cases, we might want it to follow instructions, in which case instruction tuning is a great idea. The easiest way to achieve this is by continuing to train the language model with the language modeling objective on texts from the domain or task. The [StackExchange dataset](https://huggingface.co/datasets/HuggingFaceH4/stack-exchange-preferences) is enormous (over 10 million instructions), so we can easily train the language model on a subset of it. There is nothing special about fine-tuning the model before doing RLHF - it’s just the causal language modeling objective from pretraining that we apply here. To use the data efficiently, we use a technique called packing: instead of having one text per sample in the batch and then padding to either the longest text or the maximal context of the model, we concatenate a lot of texts with a EOS token in between and cut chunks of the context size to fill the batch without any padding. ![chapter10_preprocessing-clm.png](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/blog/stackllama/chapter10_preprocessing-clm.png) With this approach the training is much more efficient as each token that is passed through the model is also trained in contrast to padding tokens which are usually masked from the loss. If you don't have much data and are more concerned about occasionally cutting off some tokens that are overflowing the context you can also use a classical data loader. The packing is handled by the `ConstantLengthDataset` and we can then use the `Trainer` after loading the model with `peft`. First, we load the model in int8, prepare it for training, and then add the LoRA adapters. ```python # load model in 8bit model = AutoModelForCausalLM.from_pretrained( args.model_path, load_in_8bit=True, device_map={"": Accelerator().local_process_index} ) model = prepare_model_for_kbit_training(model) # add LoRA to model lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) ``` We train the model for a few thousand steps with the causal language modeling objective and save the model. Since we will tune the model again with different objectives, we merge the adapter weights with the original model weights. **Disclaimer:** due to LLaMA's license, we release only the adapter weights for this and the model checkpoints in the following sections. You can apply for access to the base model's weights by filling out Meta AI's [form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform) and then converting them to the 🤗 Transformers format by running this [script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). Note that you'll also need to install 🤗 Transformers from source until the `v4.28` is released. Now that we have fine-tuned the model for the task, we are ready to train a reward model. ## Reward modeling and human preferences In principle, we could fine-tune the model using RLHF directly with the human annotations. However, this would require us to send some samples to humans for rating after each optimization iteration. This is expensive and slow due to the number of training samples needed for convergence and the inherent latency of human reading and annotator speed. A trick that works well instead of direct feedback is training a reward model on human annotations collected before the RL loop. The goal of the reward model is to imitate how a human would rate a text. There are several possible strategies to build a reward model: the most straightforward way would be to predict the annotation (e.g. a rating score or a binary value for “good”/”bad”). In practice, what works better is to predict the ranking of two examples, where the reward model is presented with two candidates `(y_k, y_j)` for a given prompt `x` and has to predict which one would be rated higher by a human annotator. With the StackExchange dataset, we can infer which of the two answers was preferred by the users based on the score. With that information and the loss defined above, we can then modify the `transformers.Trainer` by adding a custom loss function. ```python class RewardTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): rewards_j = model(input_ids=inputs["input_ids_j"], attention_mask=inputs["attention_mask_j"])[0] rewards_k = model(input_ids=inputs["input_ids_k"], attention_mask=inputs["attention_mask_k"])[0] loss = -nn.functional.logsigmoid(rewards_j - rewards_k).mean() if return_outputs: return loss, {"rewards_j": rewards_j, "rewards_k": rewards_k} return loss ``` We utilize a subset of a 100,000 pair of candidates and evaluate on a held-out set of 50,000. With a modest training batch size of 4, we train the Llama model using the LoRA `peft` adapter for a single epoch using the Adam optimizer with BF16 precision. Our LoRA configuration is: ```python peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) ``` As detailed in the next section, the resulting adapter can be merged into the frozen model and saved for further downstream use. ## Reinforcement Learning from Human Feedback With the fine-tuned language model and the reward model at hand, we are now ready to run the RL loop. It follows roughly three steps: 1. Generate responses from prompts, 2. Rate the responses with the reward model, 3. Run a reinforcement learning policy-optimization step with the ratings. The Query and Response prompts are templated as follows before being tokenized and passed to the model: ```bash Question: <Query> Answer: <Response> ``` The same template was used for SFT, RM and RLHF stages. Once more, we utilize `peft` for memory-efficient training, which offers an extra advantage in the RLHF context. Here, the reference model and policy share the same base, the SFT model, which we load in 8-bit and freeze during training. We exclusively optimize the policy's LoRA weights using PPO while sharing the base model's weights. ```python for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): question_tensors = batch["input_ids"] # sample from the policy and to generate responses response_tensors = ppo_trainer.generate( question_tensors, return_prompt=False, length_sampler=output_length_sampler, **generation_kwargs, ) batch["response"] = tokenizer.batch_decode(response_tensors, skip_special_tokens=True) # Compute sentiment score texts = [q + r for q, r in zip(batch["query"], batch["response"])] pipe_outputs = sentiment_pipe(texts, **sent_kwargs) rewards = [torch.tensor(output[0]["score"] - script_args.reward_baseline) for output in pipe_outputs] # Run PPO step stats = ppo_trainer.step(question_tensors, response_tensors, rewards) # Log stats to Wandb ppo_trainer.log_stats(stats, batch, rewards) ``` For the rest of the details and evaluation, please refer to our [blog post on StackLLaMA](https://huggingface.co/blog/stackllama).
trl/docs/source/using_llama_models.mdx/0
{ "file_path": "trl/docs/source/using_llama_models.mdx", "repo_id": "trl", "token_count": 2999 }
389
# RLHF pipeline for the creation of StackLLaMa: a Stack exchange llama-7b model. There were three main steps to the training process: 1. Supervised fine-tuning of the base llama-7b model to create llama-7b-se: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/supervised_finetuning.py --model_path=<LLAMA_MODEL_PATH> --streaming --learning_rate 1e-5 --max_steps 5000 --output_dir ./llama-se` 2. Reward modeling using dialog pairs from the SE dataset using the llama-7b-se to create llama-7b-se-rm: - `torchrun --nnodes 1 --nproc_per_node 8 examples/research_projects/stack_llama/scripts/reward_modeling.py --model_name=<LLAMA_SE_MODEL>` 3. RL fine-tuning of llama-7b-se with the llama-7b-se-rm reward model: - `accelerate launch --multi_gpu --num_machines 1 --num_processes 8 examples/research_projects/stack_llama/scripts/rl_training.py --log_with=wandb --model_name=<LLAMA_SE_MODEL> --reward_model_name=<LLAMA_SE_RM_MODEL> --adafactor=False --tokenizer_name=<LLAMA_TOKENIZER> --save_freq=100 --output_max_length=128 --batch_size=8 --gradient_accumulation_steps=8 --batched_gen=True --ppo_epochs=4 --seed=0 --learning_rate=1.4e-5 --early_stopping=True --output_dir=llama-se-rl-finetune-128-8-8-1.4e-5_adam` LoRA layers were using at all stages to reduce memory requirements. At each stage the peft adapter layers were merged with the base model, using: ```shell python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --adapter_model_name=XXX --base_model_name=YYY --output_name=ZZZ ``` Note that this script requires `peft>=0.3.0`. For access to the base llama-7b model, please see Meta's [release](https://ai.facebook.com/blog/large-language-model-llama-meta-ai/) and [request form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform).
trl/examples/research_projects/stack_llama/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama/scripts/README.md", "repo_id": "trl", "token_count": 696 }
390
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # regular: python examples/scripts/dpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/dpo.py \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 1e-3 \ --gradient_accumulation_steps 1 \ --logging_steps 10 \ --eval_steps 500 \ --output_dir="dpo_anthropic_hh" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --bf16 \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ from dataclasses import dataclass, field from typing import Dict, Optional import torch from datasets import Dataset, load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments from trl import DPOTrainer, ModelConfig, get_kbit_device_map, get_peft_config, get_quantization_config @dataclass class ScriptArguments: beta: float = field(default=0.1, metadata={"help": "the beta parameter for DPO loss"}) max_length: int = field(default=512, metadata={"help": "max length of each sample"}) max_prompt_length: int = field(default=128, metadata={"help": "max length of each sample's prompt"}) max_target_length: int = field( default=128, metadata={"help": "Only used for encoder decoder model. Max target of each sample's prompt"} ) sanity_check: bool = field(default=True, metadata={"help": "only train on 1000 samples"}) ignore_bias_buffers: bool = field( default=False, metadata={ "help": "debug argument for distributed training;" "fix for DDP issues with LM bias/mask buffers - invalid scalar type,`inplace operation. See" "https://github.com/huggingface/transformers/issues/22482#issuecomment-1595790992" }, ) generate_during_eval: bool = field(default=False, metadata={"help": "Generate during evaluation"}) def extract_anthropic_prompt(prompt_and_response): """Extract the anthropic prompt from a prompt and response pair.""" search_term = "\n\nAssistant:" search_term_idx = prompt_and_response.rfind(search_term) assert search_term_idx != -1, f"Prompt and response does not contain '{search_term}'" return prompt_and_response[: search_term_idx + len(search_term)] def get_hh(split: str, sanity_check: bool = False, silent: bool = False, cache_dir: Optional[str] = None) -> Dataset: """Load the Anthropic Helpful-Harmless dataset from Hugging Face and convert it to the necessary format. The dataset is converted to a dictionary with the following structure: { 'prompt': List[str], 'chosen': List[str], 'rejected': List[str], } Prompts should be structured as follows: \n\nHuman: <prompt>\n\nAssistant: Multiple turns are allowed, but the prompt should always start with \n\nHuman: and end with \n\nAssistant:. """ dataset = load_dataset("Anthropic/hh-rlhf", split=split, cache_dir=cache_dir) if sanity_check: dataset = dataset.select(range(min(len(dataset), 1000))) def split_prompt_and_responses(sample) -> Dict[str, str]: prompt = extract_anthropic_prompt(sample["chosen"]) return { "prompt": prompt, "chosen": sample["chosen"][len(prompt) :], "rejected": sample["rejected"][len(prompt) :], } return dataset.map(split_prompt_and_responses) if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, TrainingArguments, ModelConfig)) args, training_args, model_config = parser.parse_args_into_dataclasses() ################ # Model & Tokenizer ################ torch_dtype = ( model_config.torch_dtype if model_config.torch_dtype in ["auto", None] else getattr(torch, model_config.torch_dtype) ) quantization_config = get_quantization_config(model_config) model_kwargs = dict( revision=model_config.model_revision, trust_remote_code=model_config.trust_remote_code, attn_implementation=model_config.attn_implementation, torch_dtype=torch_dtype, use_cache=False if training_args.gradient_checkpointing else True, device_map=get_kbit_device_map() if quantization_config is not None else None, quantization_config=quantization_config, ) model = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) peft_config = get_peft_config(model_config) if peft_config is None: model_ref = AutoModelForCausalLM.from_pretrained(model_config.model_name_or_path, **model_kwargs) else: model_ref = None tokenizer = AutoTokenizer.from_pretrained(model_config.model_name_or_path) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token if args.ignore_bias_buffers: # torch distributed hack model._ddp_params_and_buffers_to_ignore = [ name for name, buffer in model.named_buffers() if buffer.dtype == torch.bool ] ################ # Dataset ################ train_dataset = get_hh("train", sanity_check=args.sanity_check) eval_dataset = get_hh("test", sanity_check=args.sanity_check) ################ # Training ################ trainer = DPOTrainer( model, model_ref, args=training_args, beta=args.beta, train_dataset=train_dataset, eval_dataset=eval_dataset, tokenizer=tokenizer, max_length=args.max_length, max_target_length=args.max_target_length, max_prompt_length=args.max_prompt_length, generate_during_eval=args.generate_during_eval, peft_config=get_peft_config(model_config), ) trainer.train() trainer.save_model(training_args.output_dir)
trl/examples/scripts/dpo.py/0
{ "file_path": "trl/examples/scripts/dpo.py", "repo_id": "trl", "token_count": 2622 }
391
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import tempfile import unittest import torch from accelerate.utils.memory import release_memory from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, TrainingArguments from trl import SFTTrainer, is_peft_available from trl.models.utils import setup_chat_format from ..testing_utils import require_bitsandbytes, require_peft, require_torch_gpu, require_torch_multi_gpu from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS if is_peft_available(): from peft import LoraConfig, PeftModel @require_torch_gpu class SFTTrainerSlowTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.train_dataset = load_dataset("imdb", split="train[:10%]") cls.eval_dataset = load_dataset("imdb", split="test[:10%]") cls.dataset_text_field = "text" cls.max_seq_length = 128 cls.peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) def tearDown(self): gc.collect() torch.cuda.empty_cache() gc.collect() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_str(self, model_name, packing): """ Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, ) trainer = SFTTrainer( model_name, args=args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft def test_sft_trainer_peft(self, model_name, packing): """ Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers_mp(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand( list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS)) ) @require_torch_multi_gpu def test_sft_trainer_transformers_mp_gc_device_map( self, model_name, packing, gradient_checkpointing_kwargs, device_map ): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc). """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft @require_bitsandbytes def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, packing=packing, dataset_text_field=self.dataset_text_field, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft @require_bitsandbytes def test_sft_trainer_with_chat_format_qlora(self, model_name, packing): """ Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer as expected. """ with tempfile.TemporaryDirectory() as tmp_dir: train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train") args = TrainingArguments( output_dir=tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) model, tokenizer = setup_chat_format(model, tokenizer) trainer = SFTTrainer( model, args=args, tokenizer=tokenizer, train_dataset=train_dataset, packing=packing, max_seq_length=self.max_seq_length, peft_config=self.peft_config, ) assert isinstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer)
trl/tests/slow/test_sft_slow.py/0
{ "file_path": "trl/tests/slow/test_sft_slow.py", "repo_id": "trl", "token_count": 7186 }
392
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest import torch from datasets import Dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, EvalPrediction from trl import RewardConfig, RewardTrainer from trl.trainer import compute_accuracy from .testing_utils import require_peft class RewardTrainerTester(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_id = "trl-internal-testing/dummy-GPT2-correct-vocab" cls.model = AutoModelForSequenceClassification.from_pretrained(cls.model_id) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token def test_accuracy_metrics(self): dummy_eval_predictions = EvalPrediction(torch.FloatTensor([[0.1, 0.9], [0.9, 0.1]]), torch.LongTensor([0, 0])) accuracy = compute_accuracy(dummy_eval_predictions) assert accuracy["accuracy"] == 0.5 def test_reward_trainer(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, evaluation_strategy="steps", ) # fmt: off dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 0]), torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 1]), ], } # fmt: on dummy_dataset = Dataset.from_dict(dummy_dataset_dict) trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None # check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) # check the params have changed - ignore 0 biases if param.sum() != 0: assert not torch.equal(param, new_param) preds = trainer.predict(dummy_dataset) assert preds.predictions.shape == (4, 2) @require_peft def test_reward_trainer_peft(self): import peft from peft import LoraConfig, TaskType peft_version = peft.__version__ peft_config = LoraConfig( task_type=TaskType.SEQ_CLS, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1, ) with tempfile.TemporaryDirectory() as tmp_dir: training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=6, remove_unused_columns=False, gradient_accumulation_steps=2, learning_rate=9e-1, evaluation_strategy="steps", ) # fmt: off dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 0]), torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 1]), ], } # fmt: on dummy_dataset = Dataset.from_dict(dummy_dataset_dict) trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, peft_config=peft_config, ) previous_trainable_params = {} previous_non_trainable_params = {} # due to a change in the way the modules to save are dealt in PEFT. trainable_params_name = ["lora", "score"] if peft_version < "0.3.0" else ["lora", "modules_to_save"] # check gradients are not None for n, param in trainer.model.named_parameters(): if any(t in n for t in trainable_params_name): previous_trainable_params[n] = param.clone() else: previous_non_trainable_params[n] = param.clone() trainer.train() assert trainer.state.log_history[(-1)]["train_loss"] is not None # check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) assert not torch.allclose(param, new_param, atol=1e-12, rtol=1e-12) # check the non trainable params have not changed for n, param in previous_non_trainable_params.items(): new_param = trainer.model.get_parameter(n) assert torch.allclose(param, new_param, atol=1e-12, rtol=1e-12) preds = trainer.predict(dummy_dataset) assert preds.predictions.shape == (4, 2) def test_reward_trainer_assert_value_error(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=1, remove_unused_columns=False, ) # fmt: off dummy_dataset_dict = { "input_ids_b": [ torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), ], "attention_mask_c": [ torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), ], "input_ids_f": [ torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), ], "attention_mask_g": [ torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 0]), torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 1]), ], } # fmt: on dummy_dataset = Dataset.from_dict(dummy_dataset_dict) trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, ) with pytest.raises(ValueError): trainer.train() training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=1, remove_unused_columns=True, ) with self.assertWarns(UserWarning): trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, ) def test_reward_trainer_margin(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, evaluation_strategy="steps", ) # fmt: off dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), ], "margin": [ torch.FloatTensor([1.0]), ] } # fmt: on dummy_dataset = Dataset.from_dict(dummy_dataset_dict) trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) batch = [dummy_dataset[0]] batch = trainer.data_collator(batch) loss, outputs = trainer.compute_loss(trainer.model, batch, return_outputs=True) l_val = -torch.nn.functional.logsigmoid( outputs["rewards_chosen"] - outputs["rewards_rejected"] - batch["margin"] ).mean() assert abs(loss - l_val) < 1e-6 def test_reward_trainer_tags(self): with tempfile.TemporaryDirectory() as tmp_dir: training_args = RewardConfig( output_dir=tmp_dir, per_device_train_batch_size=2, max_steps=3, remove_unused_columns=False, gradient_accumulation_steps=4, learning_rate=9e-1, evaluation_strategy="steps", ) # fmt: off dummy_dataset_dict = { "input_ids_chosen": [ torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), torch.LongTensor([0, 1, 2]), torch.LongTensor([1, 2]), ], "attention_mask_chosen": [ torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), torch.LongTensor([1, 1, 1]), torch.LongTensor([1, 0]), ], "input_ids_rejected": [ torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), torch.LongTensor([0, 2]), torch.LongTensor([1, 2, 0]), ], "attention_mask_rejected": [ torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 0]), torch.LongTensor([1, 1]), torch.LongTensor([1, 1, 1]), ], } # fmt: on dummy_dataset = Dataset.from_dict(dummy_dataset_dict) trainer = RewardTrainer( model=self.model, args=training_args, tokenizer=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) assert trainer.model.model_tags == trainer._tag_names
trl/tests/test_reward_trainer.py/0
{ "file_path": "trl/tests/test_reward_trainer.py", "repo_id": "trl", "token_count": 7653 }
393
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ State dict utilities: utility methods for converting state dicts easily File copied from diffusers to avoid import issues and make TRL compatible with most of diffusers versions. """ import enum class StateDictType(enum.Enum): """ The mode to use when converting state dicts. """ DIFFUSERS_OLD = "diffusers_old" PEFT = "peft" PEFT_TO_DIFFUSERS = { ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", "to_k.lora_A": "to_k.lora.down", "to_k.lora_B": "to_k.lora.up", "to_q.lora_A": "to_q.lora.down", "to_q.lora_B": "to_q.lora.up", "to_v.lora_A": "to_v.lora.down", "to_v.lora_B": "to_v.lora.up", "to_out.0.lora_A": "to_out.0.lora.down", "to_out.0.lora_B": "to_out.0.lora.up", } DIFFUSERS_OLD_TO_DIFFUSERS = { ".to_q_lora.up": ".q_proj.lora_linear_layer.up", ".to_q_lora.down": ".q_proj.lora_linear_layer.down", ".to_k_lora.up": ".k_proj.lora_linear_layer.up", ".to_k_lora.down": ".k_proj.lora_linear_layer.down", ".to_v_lora.up": ".v_proj.lora_linear_layer.up", ".to_v_lora.down": ".v_proj.lora_linear_layer.down", ".to_out_lora.up": ".out_proj.lora_linear_layer.up", ".to_out_lora.down": ".out_proj.lora_linear_layer.down", } DIFFUSERS_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS, } KEYS_TO_ALWAYS_REPLACE = { ".processor.": ".", } def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping)
trl/trl/models/sd_utils.py/0
{ "file_path": "trl/trl/models/sd_utils.py", "repo_id": "trl", "token_count": 2507 }
394
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import warnings from collections import deque from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import torch from accelerate import PartialState from torch.nn.utils.rnn import pad_sequence from torch.utils.data import IterableDataset from transformers import BitsAndBytesConfig, DataCollatorForLanguageModeling, PreTrainedTokenizerBase from ..import_utils import is_peft_available, is_unsloth_available, is_xpu_available from ..trainer.model_config import ModelConfig if is_peft_available(): from peft import LoraConfig, PeftConfig class AdaptiveKLController: """ Adaptive KL controller described in the paper: https://arxiv.org/pdf/1909.08593.pdf """ def __init__(self, init_kl_coef, target, horizon): self.value = init_kl_coef self.target = target self.horizon = horizon def update(self, current, n_steps): target = self.target proportional_error = np.clip(current / target - 1, -0.2, 0.2) mult = 1 + proportional_error * n_steps / self.horizon self.value *= mult class FixedKLController: """Fixed KL controller.""" def __init__(self, kl_coef): self.value = kl_coef def update(self, current, n_steps): pass class DataCollatorForCompletionOnlyLM(DataCollatorForLanguageModeling): """ Data collator used for completion tasks. It ensures that all the tokens of the labels are set to an 'ignore_index' when they do not come from the assistant. This ensure that the loss is only calculated on the completion made by the assistant. Args: response_template (`Union[str, List[int]]`): the template form that indicates the start of the response, typically something like '### Response:\n'. It can also be passed as tokenized ids, which can be useful when using a tokenizer that encodes the response differently if it does not have proper context. instruction_template (`Union[str, List[int]]`): the template form that indicates the start of the human instruction, typically something like '### Human:\n'. Useful for assistant-style conversation datasets. It can also be passed as tokenized ids. mlm (`bool`, *optional*, defaults to `False`): Whether or not to use masked language modeling in the underlying `DataCollatorForLanguageModeling` class. Note that this option currently has no effect but is present for flexibility and backwards-compatibility. ignore_index (`int`, *optional*, defaults to `-100`): The index to use to ignore the initial tokens with """ def __init__( self, response_template: Union[str, List[int]], instruction_template: Optional[Union[str, List[int]]] = None, *args, mlm: bool = False, ignore_index: int = -100, **kwargs, ): super().__init__(*args, mlm=mlm, **kwargs) self.instruction_template = instruction_template if isinstance(instruction_template, str): # The user provides a string, must tokenize self.instruction_token_ids = self.tokenizer.encode(self.instruction_template, add_special_tokens=False) else: # The user already provides the token ids self.instruction_token_ids = instruction_template self.response_template = response_template if isinstance(response_template, str): # The user provides a string, must tokenize self.response_token_ids = self.tokenizer.encode(self.response_template, add_special_tokens=False) else: # The user already provides the token ids self.response_token_ids = response_template if not self.mlm and self.instruction_template and self.tokenizer.pad_token_id == self.tokenizer.eos_token_id: warnings.warn( "The pad_token_id and eos_token_id values of this tokenizer are identical. " "If you are planning for multi-turn training, " "it can result in the model continuously generating questions and answers without eos token. " "To avoid this, set the pad_token_id to a different value." ) self.ignore_index = ignore_index def torch_call(self, examples: List[Union[List[int], Any, Dict[str, Any]]]) -> Dict[str, Any]: batch = super().torch_call(examples) if self.instruction_template is None: for i in range(len(examples)): response_token_ids_start_idx = None for idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # `response_token_ids` is `'### Response:\n'`, here we are just making sure that the token IDs match if ( self.response_token_ids == batch["labels"][i][idx : idx + len(self.response_token_ids)].tolist() ): response_token_ids_start_idx = idx if response_token_ids_start_idx is None: warnings.warn( f"Could not find response key `{self.response_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index else: response_token_ids_end_idx = response_token_ids_start_idx + len(self.response_token_ids) # Make pytorch loss function ignore all tokens up through the end of the response key batch["labels"][i, :response_token_ids_end_idx] = self.ignore_index else: for i in range(len(examples)): response_token_ids_idxs = [] human_token_ids_idxs = [] for assistant_idx in np.where(batch["labels"][i] == self.response_token_ids[0])[0]: # find the indexes of the start of a response. if ( self.response_token_ids == batch["labels"][i][assistant_idx : assistant_idx + len(self.response_token_ids)].tolist() ): response_token_ids_idxs.append(assistant_idx + len(self.response_token_ids)) if len(response_token_ids_idxs) == 0: warnings.warn( f"Could not find response key `{self.response_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index human_token_ids = self.instruction_token_ids for human_idx in np.where(batch["labels"][i] == human_token_ids[0])[0]: # find the indexes of the start of a human answer. if human_token_ids == batch["labels"][i][human_idx : human_idx + len(human_token_ids)].tolist(): human_token_ids_idxs.append(human_idx) if len(human_token_ids_idxs) == 0: warnings.warn( f"Could not find instruction key `{self.instruction_template}` in the " f'following instance: {self.tokenizer.decode(batch["input_ids"][i])} ' f"This instance will be ignored in loss calculation. " f"Note, if this happens often, consider increasing the `max_seq_length`." ) batch["labels"][i, :] = self.ignore_index if ( len(human_token_ids_idxs) > 0 and len(response_token_ids_idxs) > 0 and human_token_ids_idxs[0] > response_token_ids_idxs[0] ): human_token_ids_idxs = [0] + human_token_ids_idxs for idx, (start, end) in enumerate(zip(human_token_ids_idxs, response_token_ids_idxs)): # Make pytorch loss function ignore all non response tokens if idx != 0: batch["labels"][i, start:end] = self.ignore_index else: batch["labels"][i, :end] = self.ignore_index if len(response_token_ids_idxs) < len(human_token_ids_idxs): batch["labels"][i, human_token_ids_idxs[-1] :] = self.ignore_index return batch @dataclass class RewardDataCollatorWithPadding: r""" Reward DataCollator class that pads the inputs to the maximum length of the batch. Args: tokenizer (`PreTrainedTokenizerBase`): The tokenizer used for encoding the data. padding (`Union[bool, str, `PaddingStrategy`]`, `optional`, defaults to `True`): padding_strategy to pass to the tokenizer. max_length (`Optional[int]`, `optional`, defaults to `None`): The maximum length of the sequence to be processed. pad_to_multiple_of (`Optional[int]`, `optional`, defaults to `None`): If set will pad the sequence to a multiple of the provided value. return_tensors (`str`, `optional`, defaults to `"pt"`): The tensor type to use. """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None return_tensors: str = "pt" def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: features_chosen = [] features_rejected = [] margin = [] # check if we have a margin. If we do, we need to batch it as well has_margin = "margin" in features[0] for feature in features: # check if the keys are named as expected if ( "input_ids_chosen" not in feature or "input_ids_rejected" not in feature or "attention_mask_chosen" not in feature or "attention_mask_rejected" not in feature ): raise ValueError( "The features should include `input_ids_chosen`, `attention_mask_chosen`, `input_ids_rejected` and `attention_mask_rejected`" ) features_chosen.append( { "input_ids": feature["input_ids_chosen"], "attention_mask": feature["attention_mask_chosen"], } ) features_rejected.append( { "input_ids": feature["input_ids_rejected"], "attention_mask": feature["attention_mask_rejected"], } ) if has_margin: margin.append(feature["margin"]) batch_chosen = self.tokenizer.pad( features_chosen, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch_rejected = self.tokenizer.pad( features_rejected, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=self.return_tensors, ) batch = { "input_ids_chosen": batch_chosen["input_ids"], "attention_mask_chosen": batch_chosen["attention_mask"], "input_ids_rejected": batch_rejected["input_ids"], "attention_mask_rejected": batch_rejected["attention_mask"], "return_loss": True, } if has_margin: margin = torch.tensor(margin, dtype=torch.float) batch["margin"] = margin return batch @dataclass class DPODataCollatorWithPadding: r""" DPO DataCollator class that pads the tokenized inputs to the maximum length of the batch. Args: pad_token_id (`int` defaults to 0): The tokenizer's pad_token_id. label_pad_token_id (`int`, defaults to -100): The label used for masking. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): Whether or not you model has an encoder_decoder architecture. """ pad_token_id: int = 0 label_pad_token_id: int = -100 is_encoder_decoder: Optional[bool] = False def __call__(self, features: List[Dict[str, Any]]) -> Dict[str, Any]: # first, pad everything to the same length padded_batch = {} for k in features[0].keys(): if k.endswith("_input_ids") or k.endswith("_attention_mask") or k.endswith("_labels"): if self.is_encoder_decoder: to_pad = [torch.LongTensor(ex[k]) for ex in features] if (k.startswith("prompt")) and (k.endswith("input_ids")): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 elif k.startswith(("chosen", "rejected", "completion")) or ("decoder" in k): padding_value = self.label_pad_token_id else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) else: # adapted from https://stackoverflow.com/questions/73256206 if "prompt" in k: to_pad = [torch.LongTensor(ex[k][::-1]) for ex in features] else: to_pad = [torch.LongTensor(ex[k]) for ex in features] if k.endswith("_input_ids"): if self.pad_token_id is None: raise ValueError( "Padding is enabled, but the tokenizer is not configured with a padding token." " Explicitly set `tokenizer.pad_token` (e.g. `tokenizer.pad_token = tokenizer.eos_token`)" " before calling the trainer." ) padding_value = self.pad_token_id elif k.endswith("_labels"): padding_value = self.label_pad_token_id elif k.endswith("_attention_mask"): padding_value = 0 else: raise ValueError(f"Unexpected key in batch '{k}'") padded_batch[k] = pad_sequence(to_pad, batch_first=True, padding_value=padding_value) # for the prompt, flip back so padding is on left side if "prompt" in k: padded_batch[k] = padded_batch[k].flip(dims=[1]) elif k.endswith("_logps"): # the cached reference model logprobs padded_batch[k] = torch.tensor([ex[k] for ex in features]) else: padded_batch[k] = [ex[k] for ex in features] return padded_batch class ConstantLengthDataset(IterableDataset): """ Iterable dataset that returns constant length chunks of tokens from stream of text files. The dataset also formats the text before tokenization with a specific format that is provided by the user. Args: tokenizer (`transformers.PreTrainedTokenizer`): The processor used for processing the data. dataset (`dataset.Dataset`): Dataset with text files. dataset_text_field (`str`, **optional**): Name of the field in the dataset that contains the text. Used only if `formatting_func` is `None`. formatting_func (`Callable`, **optional**): Function that formats the text before tokenization. Usually it is recommended to have follows a certain pattern such as `"### Question: {question} ### Answer: {answer}"` infinite (`bool`, *optional*, defaults to `False`): If True the iterator is reset after dataset reaches end else stops. seq_length (`int`, *optional*, defaults to `1024`): Length of token sequences to return. num_of_sequences (`int`, *optional*, defaults to `1024`): Number of token sequences to keep in buffer. chars_per_token (`int`, *optional*, defaults to `3.6`): Number of characters per token used to estimate number of tokens in text buffer. eos_token_id (`int`, *optional*, defaults to `0`): Id of the end of sequence token if the passed tokenizer does not have an EOS token. shuffle ('bool', *optional*, defaults to True) Shuffle the examples before they are returned append_concat_token ('bool', *optional*, defaults to True) If true, appends `eos_token_id` at the end of each sample being packed. add_special_tokens ('bool', *optional*, defaults to True) If true, tokenizers adds special tokens to each sample being packed. """ def __init__( self, tokenizer, dataset, dataset_text_field=None, formatting_func=None, infinite=False, seq_length=1024, num_of_sequences=1024, chars_per_token=3.6, eos_token_id=0, shuffle=True, append_concat_token=True, add_special_tokens=True, ): self.tokenizer = tokenizer if tokenizer.eos_token_id is None: warnings.warn( "The passed tokenizer does not have an EOS token. We will use the passed eos_token_id instead which corresponds" f" to {eos_token_id}. If this is not the correct EOS token, make sure to pass the correct eos_token_id." ) self.concat_token_id = tokenizer.eos_token_id if tokenizer.eos_token_id else eos_token_id self.dataset = dataset self.seq_length = seq_length self.infinite = infinite self.current_size = 0 self.max_buffer_size = seq_length * chars_per_token * num_of_sequences self.shuffle = shuffle self.append_concat_token = append_concat_token self.add_special_tokens = add_special_tokens if formatting_func is None: self.formatting_func = lambda x: x[dataset_text_field] else: self.formatting_func = formatting_func if formatting_func is not None: if formatting_func.__code__.co_argcount > 1: warnings.warn( "The passed formatting_func has more than one argument. Usually that function should have a single argument `example`" " which corresponds to the dictionary returned by each element of the dataset. Make sure you know what you are doing." ) def __len__(self): return len(self.dataset) def __iter__(self): iterator = iter(self.dataset) more_examples = True while more_examples: buffer, buffer_len = [], 0 while True: if buffer_len >= self.max_buffer_size: break try: buffer.append(self.formatting_func(next(iterator))) buffer_len += len(buffer[-1]) except StopIteration: if self.infinite: iterator = iter(self.dataset) warnings.warn("The dataset reached end and the iterator is reset to the start.") else: more_examples = False break tokenized_inputs = self.tokenizer(buffer, add_special_tokens=self.add_special_tokens, truncation=False)[ "input_ids" ] all_token_ids = [] for tokenized_input in tokenized_inputs: if self.append_concat_token: tokenized_input = tokenized_input + [self.concat_token_id] all_token_ids.extend(tokenized_input) examples = [] for i in range(0, len(all_token_ids), self.seq_length): input_ids = all_token_ids[i : i + self.seq_length] if len(input_ids) == self.seq_length: examples.append(input_ids) if self.shuffle: random.shuffle(examples) for example in examples: self.current_size += 1 yield { "input_ids": torch.LongTensor(example), "labels": torch.LongTensor(example), } class RunningMoments: def __init__(self, accelerator): """ Calculates the running mean and standard deviation of a data stream. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L75 """ self.mean = 0 self.std = 1 self.var = 1 self.count = 1e-24 self.accelerator = accelerator @torch.no_grad() def update(self, xs: torch.Tensor) -> Tuple[float, float]: """ Updates running moments from batch's moments computed across ranks """ if self.accelerator.use_distributed: xs_mean, xs_var, xs_count = get_global_statistics(self.accelerator, xs) else: xs_count = xs.numel() xs_var, xs_mean = torch.var_mean(xs, unbiased=False) xs_mean, xs_var = xs_mean.float(), xs_var.float() delta = xs_mean - self.mean tot_count = self.count + xs_count new_sum = xs_var * xs_count # correct old_sum deviation accounting for the new mean old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count tot_sum = old_sum + new_sum self.mean += delta * xs_count / tot_count self.var = tot_sum / tot_count self.std = (self.var * tot_count / (tot_count - 1)).float().sqrt() self.count = tot_count return xs_mean.item(), (xs_var * xs_count / (xs_count - 1)).float().sqrt().item() @torch.no_grad() def get_global_statistics(accelerator, xs: torch.Tensor, mask=None, device="cpu") -> Tuple[float, float, int]: """ Computes element-wise mean and variance of the tensor across processes. Reference: https://github.com/OpenLMLab/MOSS-RLHF/blob/40b91eb2f2b71b16919addede0341d2bef70825d/utils.py#L57C1-L73C75 """ xs = xs.to(accelerator.device) sum_and_count = torch.tensor([xs.sum(), (xs.numel() if mask is None else mask.sum())], device=xs.device) sum_and_count = accelerator.reduce(sum_and_count) global_sum, count = sum_and_count global_mean = global_sum / count sum_var = torch.sum(((xs - global_mean) ** 2).mul(1 if mask is None else mask)) sum_var = accelerator.reduce(sum_var) global_var = sum_var / count return global_mean.to(device), global_var.to(device), count.to(device) def compute_accuracy(eval_pred) -> Dict[str, float]: predictions, labels = eval_pred # Here, predictions is rewards_chosen and rewards_rejected. # We want to see how much of the time rewards_chosen > rewards_rejected. if np.array(predictions[:, 0] == predictions[:, 1], dtype=float).sum() > 0: warnings.warn( f"There are {np.array(predictions[:, 0] == predictions[:, 1]).sum()} out of {len(predictions[:, 0])} instances where the predictions for both options are equal. As a consequence the accuracy can be misleading." ) predictions = np.argmax(predictions, axis=1) accuracy = np.array(predictions == labels, dtype=float).mean().item() return {"accuracy": accuracy} def pad_to_length(tensor: torch.Tensor, length: int, pad_value: Union[int, float], dim: int = -1) -> torch.Tensor: if tensor.size(dim) >= length: return tensor else: pad_size = list(tensor.shape) pad_size[dim] = length - tensor.size(dim) return torch.cat( [ tensor, pad_value * torch.ones(*pad_size, dtype=tensor.dtype, device=tensor.device), ], dim=dim, ) def disable_dropout_in_model(model: torch.nn.Module) -> None: for module in model.modules(): if isinstance(module, torch.nn.Dropout): module.p = 0 def exact_div(a, b, a_str, b_str, custom_error_message=""): q = a // b if a != q * b: raise ValueError(f"{custom_error_message}, {a_str}={a}, {b_str}={b}, inexact division: {a} / {b} = {a / b}") return q # copied from https://github.com/kvablack/ddpo-pytorch/blob/main/ddpo_pytorch/stat_tracking.py#L5 class PerPromptStatTracker: r""" Class for tracking statistics per prompt. Mainly used to calculate advantage for the DPPO algorithm Args: buffer_size (`int`): Size of the buffer to keep for each prompt. min_count (`int`): Minimum number of samples to keep in the buffer before calculating the mean and std. """ def __init__(self, buffer_size, min_count): self.buffer_size = buffer_size self.min_count = min_count self.stats = {} def update(self, prompts, rewards): prompts = np.array(prompts) rewards = np.array(rewards) unique = np.unique(prompts) advantages = np.empty_like(rewards) for prompt in unique: prompt_rewards = rewards[prompts == prompt] if prompt not in self.stats: self.stats[prompt] = deque(maxlen=self.buffer_size) self.stats[prompt].extend(prompt_rewards) if len(self.stats[prompt]) < self.min_count: mean = np.mean(rewards) std = np.std(rewards) + 1e-6 else: mean = np.mean(self.stats[prompt]) std = np.std(self.stats[prompt]) + 1e-6 advantages[prompts == prompt] = (prompt_rewards - mean) / std return advantages def get_stats(self): return {k: {"mean": np.mean(v), "std": np.std(v), "count": len(v)} for k, v in self.stats.items()} def neftune_post_forward_hook(module, input, output): """ Implements the NEFTune forward pass for the model using forward hooks. Note this works only for torch.nn.Embedding layers. This method is slightly adapted from the original source code that can be found here: https://github.com/neelsjain/NEFTune Simply add it to your model as follows: ```python model = ... model.embed_tokens.neftune_noise_alpha = 0.1 model.embed_tokens.register_forward_hook(neftune_post_forward_hook) ``` Args: module (`torch.nn.Module`): The embedding module where the hook is attached. Note that you need to set `module.neftune_noise_alpha` to the desired noise alpha value. input (`torch.Tensor`): The input tensor to the model. output (`torch.Tensor`): The output tensor of the model (i.e. the embeddings). """ if module.training: dims = torch.tensor(output.size(1) * output.size(2)) mag_norm = module.neftune_noise_alpha / torch.sqrt(dims) output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm) return output def peft_module_casting_to_bf16(model): from peft.tuners.tuners_utils import BaseTunerLayer for name, module in model.named_modules(): if isinstance(module, BaseTunerLayer): module = module.to(torch.bfloat16) elif isinstance(module, torch.nn.LayerNorm) or "norm" in name: module = module.to(torch.float32) elif any(x in name for x in ["lm_head", "embed_tokens", "wte", "wpe"]): if hasattr(module, "weight"): if module.weight.dtype == torch.float32: module = module.to(torch.bfloat16) def trl_sanitze_kwargs_for_tagging(model, tag_names, kwargs=None): if is_unsloth_available(): # Unsloth adds a new attribute in the model config `unsloth_version` # to keep track of models that have been patched with unsloth. if hasattr(model, "config") and getattr(model.config, "unsloth_version", None) is not None: tag_names.append("unsloth") if kwargs is not None: if "tags" not in kwargs: kwargs["tags"] = tag_names elif "tags" in kwargs and isinstance(kwargs["tags"], list): kwargs["tags"].extend(tag_names) elif "tags" in kwargs and isinstance(kwargs["tags"], str): tag_names.append(kwargs["tags"]) kwargs["tags"] = tag_names return kwargs def get_quantization_config(model_config: ModelConfig) -> Optional[BitsAndBytesConfig]: if model_config.load_in_4bit: quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=model_config.torch_dtype, # For consistency with model weights, we use the same value as `torch_dtype` bnb_4bit_quant_type=model_config.bnb_4bit_quant_type, bnb_4bit_use_double_quant=model_config.use_bnb_nested_quant, ) elif model_config.load_in_8bit: quantization_config = BitsAndBytesConfig( load_in_8bit=True, ) else: quantization_config = None return quantization_config def get_kbit_device_map() -> Optional[Dict[str, int]]: if is_xpu_available(): return {"": f"xpu:{PartialState().local_process_index}"} elif torch.cuda.is_available(): return {"": PartialState().local_process_index} else: return None def get_peft_config(model_config: ModelConfig) -> "Optional[PeftConfig]": if model_config.use_peft is False: return None if not is_peft_available(): raise ValueError( "You need to have PEFT library installed in your environment, make sure to install `peft`. " "Make sure to run `pip install -U peft`." ) peft_config = LoraConfig( r=model_config.lora_r, lora_alpha=model_config.lora_alpha, lora_dropout=model_config.lora_dropout, bias="none", task_type=model_config.lora_task_type, target_modules=model_config.lora_target_modules, modules_to_save=model_config.lora_modules_to_save, ) return peft_config
trl/trl/trainer/utils.py/0
{ "file_path": "trl/trl/trainer/utils.py", "repo_id": "trl", "token_count": 14652 }
395