Spaces:
Paused
Paused
# coding=utf-8 | |
# Copyright 2020 The HuggingFace Team. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
import unittest | |
from transformers import BertGenerationConfig, is_torch_available | |
from transformers.testing_utils import require_torch, slow, torch_device | |
from ...generation.test_utils import GenerationTesterMixin | |
from ...test_configuration_common import ConfigTester | |
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask | |
from ...test_pipeline_mixin import PipelineTesterMixin | |
if is_torch_available(): | |
import torch | |
from transformers import BertGenerationDecoder, BertGenerationEncoder | |
class BertGenerationEncoderTester: | |
def __init__( | |
self, | |
parent, | |
batch_size=13, | |
seq_length=7, | |
is_training=True, | |
use_input_mask=True, | |
vocab_size=99, | |
hidden_size=32, | |
num_hidden_layers=2, | |
num_attention_heads=4, | |
intermediate_size=37, | |
hidden_act="gelu", | |
hidden_dropout_prob=0.1, | |
attention_probs_dropout_prob=0.1, | |
max_position_embeddings=50, | |
initializer_range=0.02, | |
use_labels=True, | |
scope=None, | |
): | |
self.parent = parent | |
self.batch_size = batch_size | |
self.seq_length = seq_length | |
self.is_training = is_training | |
self.use_input_mask = use_input_mask | |
self.vocab_size = vocab_size | |
self.hidden_size = hidden_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.intermediate_size = intermediate_size | |
self.hidden_act = hidden_act | |
self.hidden_dropout_prob = hidden_dropout_prob | |
self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
self.max_position_embeddings = max_position_embeddings | |
self.initializer_range = initializer_range | |
self.use_labels = use_labels | |
self.scope = scope | |
def prepare_config_and_inputs(self): | |
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) | |
input_mask = None | |
if self.use_input_mask: | |
input_mask = random_attention_mask([self.batch_size, self.seq_length]) | |
if self.use_labels: | |
token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) | |
config = self.get_config() | |
return config, input_ids, input_mask, token_labels | |
def get_config(self): | |
return BertGenerationConfig( | |
vocab_size=self.vocab_size, | |
hidden_size=self.hidden_size, | |
num_hidden_layers=self.num_hidden_layers, | |
num_attention_heads=self.num_attention_heads, | |
intermediate_size=self.intermediate_size, | |
hidden_act=self.hidden_act, | |
hidden_dropout_prob=self.hidden_dropout_prob, | |
attention_probs_dropout_prob=self.attention_probs_dropout_prob, | |
max_position_embeddings=self.max_position_embeddings, | |
is_decoder=False, | |
initializer_range=self.initializer_range, | |
) | |
def prepare_config_and_inputs_for_decoder(self): | |
( | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
) = self.prepare_config_and_inputs() | |
config.is_decoder = True | |
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) | |
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) | |
return ( | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
encoder_hidden_states, | |
encoder_attention_mask, | |
) | |
def create_and_check_model( | |
self, | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
**kwargs, | |
): | |
model = BertGenerationEncoder(config=config) | |
model.to(torch_device) | |
model.eval() | |
result = model(input_ids, attention_mask=input_mask) | |
result = model(input_ids) | |
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) | |
def create_and_check_model_as_decoder( | |
self, | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
encoder_hidden_states, | |
encoder_attention_mask, | |
**kwargs, | |
): | |
config.add_cross_attention = True | |
model = BertGenerationEncoder(config=config) | |
model.to(torch_device) | |
model.eval() | |
result = model( | |
input_ids, | |
attention_mask=input_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_attention_mask, | |
) | |
result = model( | |
input_ids, | |
attention_mask=input_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
) | |
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) | |
def create_and_check_decoder_model_past_large_inputs( | |
self, | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
encoder_hidden_states, | |
encoder_attention_mask, | |
**kwargs, | |
): | |
config.is_decoder = True | |
config.add_cross_attention = True | |
model = BertGenerationDecoder(config=config).to(torch_device).eval() | |
# first forward pass | |
outputs = model( | |
input_ids, | |
attention_mask=input_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_attention_mask, | |
use_cache=True, | |
) | |
past_key_values = outputs.past_key_values | |
# create hypothetical multiple next token and extent to next_input_ids | |
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) | |
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) | |
# append to next input_ids and | |
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) | |
next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) | |
output_from_no_past = model( | |
next_input_ids, | |
attention_mask=next_attention_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_attention_mask, | |
output_hidden_states=True, | |
)["hidden_states"][0] | |
output_from_past = model( | |
next_tokens, | |
attention_mask=next_attention_mask, | |
encoder_hidden_states=encoder_hidden_states, | |
encoder_attention_mask=encoder_attention_mask, | |
past_key_values=past_key_values, | |
output_hidden_states=True, | |
)["hidden_states"][0] | |
# select random slice | |
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() | |
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() | |
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() | |
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) | |
# test that outputs are equal for slice | |
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) | |
def create_and_check_for_causal_lm( | |
self, | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
*args, | |
): | |
model = BertGenerationDecoder(config) | |
model.to(torch_device) | |
model.eval() | |
result = model(input_ids, attention_mask=input_mask, labels=token_labels) | |
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) | |
def prepare_config_and_inputs_for_common(self): | |
config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() | |
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} | |
return config, inputs_dict | |
class BertGenerationEncoderTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): | |
all_model_classes = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () | |
all_generative_model_classes = (BertGenerationDecoder,) if is_torch_available() else () | |
pipeline_model_mapping = ( | |
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder} | |
if is_torch_available() | |
else {} | |
) | |
def setUp(self): | |
self.model_tester = BertGenerationEncoderTester(self) | |
self.config_tester = ConfigTester(self, config_class=BertGenerationConfig, hidden_size=37) | |
def test_config(self): | |
self.config_tester.run_common_tests() | |
def test_model(self): | |
config_and_inputs = self.model_tester.prepare_config_and_inputs() | |
self.model_tester.create_and_check_model(*config_and_inputs) | |
def test_model_as_bert(self): | |
config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() | |
config.model_type = "bert" | |
self.model_tester.create_and_check_model(config, input_ids, input_mask, token_labels) | |
def test_model_as_decoder(self): | |
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() | |
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) | |
def test_decoder_model_past_with_large_inputs(self): | |
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() | |
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) | |
def test_model_as_decoder_with_default_input_mask(self): | |
# This regression test was failing with PyTorch < 1.3 | |
( | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
encoder_hidden_states, | |
encoder_attention_mask, | |
) = self.model_tester.prepare_config_and_inputs_for_decoder() | |
input_mask = None | |
self.model_tester.create_and_check_model_as_decoder( | |
config, | |
input_ids, | |
input_mask, | |
token_labels, | |
encoder_hidden_states, | |
encoder_attention_mask, | |
) | |
def test_for_causal_lm(self): | |
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() | |
self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) | |
def test_model_from_pretrained(self): | |
model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") | |
self.assertIsNotNone(model) | |
class BertGenerationEncoderIntegrationTest(unittest.TestCase): | |
def test_inference_no_head_absolute_embedding(self): | |
model = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") | |
input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) | |
with torch.no_grad(): | |
output = model(input_ids)[0] | |
expected_shape = torch.Size([1, 8, 1024]) | |
self.assertEqual(output.shape, expected_shape) | |
expected_slice = torch.tensor( | |
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] | |
) | |
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) | |
class BertGenerationDecoderIntegrationTest(unittest.TestCase): | |
def test_inference_no_head_absolute_embedding(self): | |
model = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") | |
input_ids = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]]) | |
with torch.no_grad(): | |
output = model(input_ids)[0] | |
expected_shape = torch.Size([1, 8, 50358]) | |
self.assertEqual(output.shape, expected_shape) | |
expected_slice = torch.tensor( | |
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] | |
) | |
self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) | |