|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from __future__ import annotations |
|
|
|
import os |
|
import tempfile |
|
import unittest |
|
|
|
from transformers import ConvBertConfig, is_tf_available |
|
from transformers.testing_utils import require_tf, slow |
|
|
|
from ...test_configuration_common import ConfigTester |
|
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask |
|
from ...test_pipeline_mixin import PipelineTesterMixin |
|
|
|
|
|
if is_tf_available(): |
|
import tensorflow as tf |
|
|
|
from transformers import ( |
|
TFConvBertForMaskedLM, |
|
TFConvBertForMultipleChoice, |
|
TFConvBertForQuestionAnswering, |
|
TFConvBertForSequenceClassification, |
|
TFConvBertForTokenClassification, |
|
TFConvBertModel, |
|
) |
|
|
|
|
|
class TFConvBertModelTester: |
|
def __init__( |
|
self, |
|
parent, |
|
batch_size=13, |
|
seq_length=7, |
|
is_training=True, |
|
use_input_mask=True, |
|
use_token_type_ids=True, |
|
use_labels=True, |
|
vocab_size=99, |
|
hidden_size=32, |
|
num_hidden_layers=2, |
|
num_attention_heads=4, |
|
intermediate_size=37, |
|
hidden_act="gelu", |
|
hidden_dropout_prob=0.1, |
|
attention_probs_dropout_prob=0.1, |
|
max_position_embeddings=512, |
|
type_vocab_size=16, |
|
type_sequence_label_size=2, |
|
initializer_range=0.02, |
|
num_labels=3, |
|
num_choices=4, |
|
scope=None, |
|
): |
|
self.parent = parent |
|
self.batch_size = 13 |
|
self.seq_length = 7 |
|
self.is_training = True |
|
self.use_input_mask = True |
|
self.use_token_type_ids = True |
|
self.use_labels = True |
|
self.vocab_size = 99 |
|
self.hidden_size = 384 |
|
self.num_hidden_layers = 2 |
|
self.num_attention_heads = 4 |
|
self.intermediate_size = 37 |
|
self.hidden_act = "gelu" |
|
self.hidden_dropout_prob = 0.1 |
|
self.attention_probs_dropout_prob = 0.1 |
|
self.max_position_embeddings = 512 |
|
self.type_vocab_size = 16 |
|
self.type_sequence_label_size = 2 |
|
self.initializer_range = 0.02 |
|
self.num_labels = 3 |
|
self.num_choices = 4 |
|
self.embedding_size = 128 |
|
self.head_ratio = 2 |
|
self.conv_kernel_size = 9 |
|
self.num_groups = 1 |
|
self.scope = None |
|
|
|
def prepare_config_and_inputs(self): |
|
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) |
|
|
|
input_mask = None |
|
if self.use_input_mask: |
|
input_mask = random_attention_mask([self.batch_size, self.seq_length]) |
|
|
|
token_type_ids = None |
|
if self.use_token_type_ids: |
|
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) |
|
|
|
sequence_labels = None |
|
token_labels = None |
|
choice_labels = None |
|
if self.use_labels: |
|
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) |
|
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) |
|
choice_labels = ids_tensor([self.batch_size], self.num_choices) |
|
|
|
config = ConvBertConfig( |
|
vocab_size=self.vocab_size, |
|
hidden_size=self.hidden_size, |
|
num_hidden_layers=self.num_hidden_layers, |
|
num_attention_heads=self.num_attention_heads, |
|
intermediate_size=self.intermediate_size, |
|
hidden_act=self.hidden_act, |
|
hidden_dropout_prob=self.hidden_dropout_prob, |
|
attention_probs_dropout_prob=self.attention_probs_dropout_prob, |
|
max_position_embeddings=self.max_position_embeddings, |
|
type_vocab_size=self.type_vocab_size, |
|
initializer_range=self.initializer_range, |
|
return_dict=True, |
|
) |
|
|
|
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
|
|
def create_and_check_model( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
model = TFConvBertModel(config=config) |
|
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} |
|
|
|
inputs = [input_ids, input_mask] |
|
result = model(inputs) |
|
|
|
result = model(input_ids) |
|
|
|
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) |
|
|
|
def create_and_check_for_masked_lm( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
model = TFConvBertForMaskedLM(config=config) |
|
inputs = { |
|
"input_ids": input_ids, |
|
"attention_mask": input_mask, |
|
"token_type_ids": token_type_ids, |
|
} |
|
result = model(inputs) |
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) |
|
|
|
def create_and_check_for_sequence_classification( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
config.num_labels = self.num_labels |
|
model = TFConvBertForSequenceClassification(config=config) |
|
inputs = { |
|
"input_ids": input_ids, |
|
"attention_mask": input_mask, |
|
"token_type_ids": token_type_ids, |
|
} |
|
|
|
result = model(inputs) |
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) |
|
|
|
def create_and_check_for_multiple_choice( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
config.num_choices = self.num_choices |
|
model = TFConvBertForMultipleChoice(config=config) |
|
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) |
|
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) |
|
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) |
|
inputs = { |
|
"input_ids": multiple_choice_inputs_ids, |
|
"attention_mask": multiple_choice_input_mask, |
|
"token_type_ids": multiple_choice_token_type_ids, |
|
} |
|
result = model(inputs) |
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) |
|
|
|
def create_and_check_for_token_classification( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
config.num_labels = self.num_labels |
|
model = TFConvBertForTokenClassification(config=config) |
|
inputs = { |
|
"input_ids": input_ids, |
|
"attention_mask": input_mask, |
|
"token_type_ids": token_type_ids, |
|
} |
|
result = model(inputs) |
|
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) |
|
|
|
def create_and_check_for_question_answering( |
|
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels |
|
): |
|
model = TFConvBertForQuestionAnswering(config=config) |
|
inputs = { |
|
"input_ids": input_ids, |
|
"attention_mask": input_mask, |
|
"token_type_ids": token_type_ids, |
|
} |
|
|
|
result = model(inputs) |
|
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) |
|
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) |
|
|
|
def prepare_config_and_inputs_for_common(self): |
|
config_and_inputs = self.prepare_config_and_inputs() |
|
( |
|
config, |
|
input_ids, |
|
token_type_ids, |
|
input_mask, |
|
sequence_labels, |
|
token_labels, |
|
choice_labels, |
|
) = config_and_inputs |
|
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} |
|
return config, inputs_dict |
|
|
|
|
|
@require_tf |
|
class TFConvBertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): |
|
all_model_classes = ( |
|
( |
|
TFConvBertModel, |
|
TFConvBertForMaskedLM, |
|
TFConvBertForQuestionAnswering, |
|
TFConvBertForSequenceClassification, |
|
TFConvBertForTokenClassification, |
|
TFConvBertForMultipleChoice, |
|
) |
|
if is_tf_available() |
|
else () |
|
) |
|
pipeline_model_mapping = ( |
|
{ |
|
"feature-extraction": TFConvBertModel, |
|
"fill-mask": TFConvBertForMaskedLM, |
|
"question-answering": TFConvBertForQuestionAnswering, |
|
"text-classification": TFConvBertForSequenceClassification, |
|
"token-classification": TFConvBertForTokenClassification, |
|
"zero-shot": TFConvBertForSequenceClassification, |
|
} |
|
if is_tf_available() |
|
else {} |
|
) |
|
test_pruning = False |
|
test_head_masking = False |
|
test_onnx = False |
|
|
|
def setUp(self): |
|
self.model_tester = TFConvBertModelTester(self) |
|
self.config_tester = ConfigTester(self, config_class=ConvBertConfig, hidden_size=37) |
|
|
|
def test_config(self): |
|
self.config_tester.run_common_tests() |
|
|
|
def test_model(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_model(*config_and_inputs) |
|
|
|
def test_for_masked_lm(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) |
|
|
|
def test_for_multiple_choice(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) |
|
|
|
def test_for_question_answering(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_for_question_answering(*config_and_inputs) |
|
|
|
def test_for_sequence_classification(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) |
|
|
|
def test_for_token_classification(self): |
|
config_and_inputs = self.model_tester.prepare_config_and_inputs() |
|
self.model_tester.create_and_check_for_token_classification(*config_and_inputs) |
|
|
|
@slow |
|
def test_saved_model_creation_extended(self): |
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
config.output_hidden_states = True |
|
config.output_attentions = True |
|
|
|
if hasattr(config, "use_cache"): |
|
config.use_cache = True |
|
|
|
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) |
|
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) |
|
|
|
for model_class in self.all_model_classes: |
|
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) |
|
model = model_class(config) |
|
num_out = len(model(class_inputs_dict)) |
|
|
|
with tempfile.TemporaryDirectory() as tmpdirname: |
|
model.save_pretrained(tmpdirname, saved_model=True) |
|
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") |
|
model = tf.keras.models.load_model(saved_model_dir) |
|
outputs = model(class_inputs_dict) |
|
|
|
if self.is_encoder_decoder: |
|
output_hidden_states = outputs["encoder_hidden_states"] |
|
output_attentions = outputs["encoder_attentions"] |
|
else: |
|
output_hidden_states = outputs["hidden_states"] |
|
output_attentions = outputs["attentions"] |
|
|
|
self.assertEqual(len(outputs), num_out) |
|
|
|
expected_num_layers = getattr( |
|
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 |
|
) |
|
|
|
self.assertEqual(len(output_hidden_states), expected_num_layers) |
|
self.assertListEqual( |
|
list(output_hidden_states[0].shape[-2:]), |
|
[self.model_tester.seq_length, self.model_tester.hidden_size], |
|
) |
|
|
|
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) |
|
self.assertListEqual( |
|
list(output_attentions[0].shape[-3:]), |
|
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], |
|
) |
|
|
|
@slow |
|
def test_model_from_pretrained(self): |
|
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") |
|
self.assertIsNotNone(model) |
|
|
|
def test_attention_outputs(self): |
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() |
|
config.return_dict = True |
|
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length) |
|
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) |
|
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length) |
|
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) |
|
|
|
def check_decoder_attentions_output(outputs): |
|
out_len = len(outputs) |
|
self.assertEqual(out_len % 2, 0) |
|
decoder_attentions = outputs.decoder_attentions |
|
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) |
|
self.assertListEqual( |
|
list(decoder_attentions[0].shape[-3:]), |
|
[self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length], |
|
) |
|
|
|
def check_encoder_attentions_output(outputs): |
|
attentions = [ |
|
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) |
|
] |
|
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) |
|
self.assertListEqual( |
|
list(attentions[0].shape[-3:]), |
|
[self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length], |
|
) |
|
|
|
for model_class in self.all_model_classes: |
|
inputs_dict["output_attentions"] = True |
|
config.output_hidden_states = False |
|
model = model_class(config) |
|
outputs = model(self._prepare_for_class(inputs_dict, model_class)) |
|
out_len = len(outputs) |
|
self.assertEqual(config.output_hidden_states, False) |
|
check_encoder_attentions_output(outputs) |
|
|
|
if self.is_encoder_decoder: |
|
model = model_class(config) |
|
outputs = model(self._prepare_for_class(inputs_dict, model_class)) |
|
self.assertEqual(config.output_hidden_states, False) |
|
check_decoder_attentions_output(outputs) |
|
|
|
|
|
del inputs_dict["output_attentions"] |
|
config.output_attentions = True |
|
model = model_class(config) |
|
outputs = model(self._prepare_for_class(inputs_dict, model_class)) |
|
self.assertEqual(config.output_hidden_states, False) |
|
check_encoder_attentions_output(outputs) |
|
|
|
|
|
inputs_dict["output_attentions"] = True |
|
config.output_hidden_states = True |
|
model = model_class(config) |
|
outputs = model(self._prepare_for_class(inputs_dict, model_class)) |
|
|
|
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs)) |
|
self.assertEqual(model.config.output_hidden_states, True) |
|
check_encoder_attentions_output(outputs) |
|
|
|
|
|
@require_tf |
|
class TFConvBertModelIntegrationTest(unittest.TestCase): |
|
@slow |
|
def test_inference_masked_lm(self): |
|
model = TFConvBertModel.from_pretrained("YituTech/conv-bert-base") |
|
input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) |
|
output = model(input_ids)[0] |
|
|
|
expected_shape = [1, 6, 768] |
|
self.assertEqual(output.shape, expected_shape) |
|
|
|
expected_slice = tf.constant( |
|
[ |
|
[ |
|
[-0.03475493, -0.4686034, -0.30638832], |
|
[0.22637248, -0.26988646, -0.7423424], |
|
[0.10324868, -0.45013508, -0.58280784], |
|
] |
|
] |
|
) |
|
tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) |
|
|